struct nv40_texture_format *tf;
struct nouveau_stateobj *so;
uint32_t txf, txs, txp;
- int swizzled = 0; /*XXX: implement in region code? */
unsigned tex_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
tf = nv40_fragtex_format(pt->format);
return NULL;
}
- if (swizzled) {
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
txp = 0;
} else {
txp = nv40mt->level[0].pitch;
mt->base = *pt;
mt->base.refcount = 1;
mt->base.screen = pscreen;
+ mt->shadow_tex = NULL;
+ mt->shadow_surface = NULL;
nv40_miptree_layout(mt);
+ /* Swizzled textures must be POT */
+ if (pt->width[0] & (pt->width[0] - 1) ||
+ pt->height[0] & (pt->height[0] - 1))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else {
+ switch (pt->format) {
+ /* TODO: Figure out which formats can be swizzled */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ /* XXX: Re-enable when SIFM size limits are fixed */
+ /*case PIPE_FORMAT_R16_SNORM:*/
+ break;
+ default:
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ }
+ }
+
mt->buffer = ws->buffer_create(ws, 256,
PIPE_BUFFER_USAGE_PIXEL |
NOUVEAU_BUFFER_USAGE_TEXTURE,
if (--pt->refcount)
return;
-
pipe_buffer_reference(pscreen, &mt->buffer, NULL);
for (l = 0; l <= pt->last_level; l++) {
if (mt->level[l].image_offset)
FREE(mt->level[l].image_offset);
}
+ if (mt->shadow_tex) {
+ assert(mt->shadow_surface);
+ pscreen->tex_surface_release(pscreen, &mt->shadow_surface);
+ nv40_miptree_release(pscreen, &mt->shadow_tex);
+ }
+
FREE(mt);
}
ps->status = PIPE_SURFACE_STATUS_DEFINED;
ps->refcount = 1;
ps->winsys = pscreen->winsys;
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
if (pt->target == PIPE_TEXTURE_CUBE) {
ps->offset = mt->level[level].image_offset[face];
nv40_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
unsigned flags )
{
- struct pipe_winsys *ws = screen->winsys;
- void *map;
+ struct pipe_winsys *ws = screen->winsys;
+ struct pipe_surface *surface_to_map;
+ void *map;
+
+ if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ struct nv40_miptree *mt = (struct nv40_miptree *)surface->texture;
+
+ if (!mt->shadow_tex) {
+ unsigned old_tex_usage = surface->texture->tex_usage;
+ surface->texture->tex_usage = NOUVEAU_TEXTURE_USAGE_LINEAR;
+ mt->shadow_tex = screen->texture_create(screen, surface->texture);
+ surface->texture->tex_usage = old_tex_usage;
+
+ assert(mt->shadow_tex->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR);
+ mt->shadow_surface = screen->get_tex_surface
+ (
+ screen, mt->shadow_tex,
+ surface->face, surface->level, surface->zslice,
+ surface->usage
+ );
+ }
+
+ surface_to_map = mt->shadow_surface;
+ }
+ else
+ surface_to_map = surface;
- map = ws->buffer_map(ws, surface->buffer, flags);
+ assert(surface_to_map);
+
+ map = ws->buffer_map(ws, surface_to_map->buffer, flags);
if (!map)
return NULL;
- return map + surface->offset;
+ return map + surface_to_map->offset;
}
static void
nv40_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
{
- struct pipe_winsys *ws = screen->winsys;
+ struct pipe_winsys *ws = screen->winsys;
+ struct pipe_surface *surface_to_unmap;
+
+ /* TODO: Copy from shadow just before push buffer is flushed instead.
+ There are probably some programs that map/unmap excessively
+ before rendering. */
+ if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ struct nv40_miptree *mt = (struct nv40_miptree *)surface->texture;
+
+ assert(mt->shadow_tex);
- ws->buffer_unmap(ws, surface->buffer);
+ surface_to_unmap = mt->shadow_surface;
+ }
+ else
+ surface_to_unmap = surface;
+
+ assert(surface_to_unmap);
+
+ ws->buffer_unmap(ws, surface_to_unmap->buffer);
+
+ if (surface_to_unmap != surface) {
+ struct nv40_screen *nvscreen = nv40_screen(screen);
+
+ nvscreen->nvws->surface_copy(nvscreen->nvws,
+ surface, 0, 0,
+ surface_to_unmap, 0, 0,
+ surface->width, surface->height);
+ }
}
static void
struct pipe_buffer *buffer;
uint total_size;
+ struct pipe_texture *shadow_tex;
+ struct pipe_surface *shadow_surface;
+
struct {
uint pitch;
uint *image_offset;
#include "nv40_context.h"
+static INLINE int log2i(int i)
+{
+ int r = 0;
+
+ if (i & 0xffff0000) {
+ i >>= 16;
+ r += 16;
+ }
+ if (i & 0x0000ff00) {
+ i >>= 8;
+ r += 8;
+ }
+ if (i & 0x000000f0) {
+ i >>= 4;
+ r += 4;
+ }
+ if (i & 0x0000000c) {
+ i >>= 2;
+ r += 2;
+ }
+ if (i & 0x00000002) {
+ r += 1;
+ }
+ return r;
+}
+
static boolean
nv40_state_framebuffer_validate(struct nv40_context *nv40)
{
zeta = fb->zsbuf;
}
- rt_format = NV40TCL_RT_FORMAT_TYPE_LINEAR;
+ if (!(rt[0]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ assert(!(fb->width & (fb->width - 1)) && !(fb->height & (fb->height - 1)));
+ for (i = 1; i < fb->num_cbufs; i++)
+ assert(!(rt[i]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
+
+ rt_format = NV40TCL_RT_FORMAT_TYPE_SWIZZLED |
+ log2i(fb->width) << NV40TCL_RT_FORMAT_LOG2_WIDTH_SHIFT |
+ log2i(fb->height) << NV40TCL_RT_FORMAT_LOG2_HEIGHT_SHIFT;
+ }
+ else
+ rt_format = NV40TCL_RT_FORMAT_TYPE_LINEAR;
switch (colour_format) {
case PIPE_FORMAT_A8R8G8B8_UNORM:
switch (format) {
case PIPE_FORMAT_A8_UNORM:
return NV04_CONTEXT_SURFACES_2D_FORMAT_Y8;
+ case PIPE_FORMAT_R16_SNORM:
case PIPE_FORMAT_R5G6B5_UNORM:
return NV04_CONTEXT_SURFACES_2D_FORMAT_R5G6B5;
- case PIPE_FORMAT_R16_SNORM:
- return NV04_CONTEXT_SURFACES_2D_FORMAT_Y16;
case PIPE_FORMAT_X8R8G8B8_UNORM:
case PIPE_FORMAT_A8R8G8B8_UNORM:
return NV04_CONTEXT_SURFACES_2D_FORMAT_A8R8G8B8;