-#ifndef __NOUVEAU_RESOURCE_H__
-#define __NOUVEAU_RESOURCE_H__
+#ifndef __NOUVEAU_BUFFER_H__
+#define __NOUVEAU_BUFFER_H__
#include "util/u_transfer.h"
#include "util/u_double_list.h"
LIBNAME = nv50
C_SOURCES = \
- nv50_buffer.c \
nv50_context.c \
nv50_draw.c \
nv50_formats.c \
+++ /dev/null
-
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include "util/u_math.h"
-
-#define NOUVEAU_NVC0
-#include "nouveau/nouveau_screen.h"
-#include "nouveau/nouveau_winsys.h"
-#include "nouveau/nouveau_mm.h"
-#undef NOUVEAU_NVC0
-
-#include "nv50_context.h"
-#include "nv50_resource.h"
-
-struct nv50_transfer {
- struct pipe_transfer base;
-};
-
-static INLINE struct nv50_transfer *
-nv50_transfer(struct pipe_transfer *transfer)
-{
- return (struct nv50_transfer *)transfer;
-}
-
-static INLINE boolean
-nv50_buffer_allocate(struct nv50_screen *screen, struct nv50_resource *buf,
- unsigned domain)
-{
- if (domain == NOUVEAU_BO_VRAM) {
- buf->mm = nouveau_mm_allocate(screen->base.mm_VRAM, buf->base.width0,
- &buf->bo, &buf->offset);
- if (!buf->bo)
- return nv50_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
- } else
- if (domain == NOUVEAU_BO_GART) {
- buf->mm = nouveau_mm_allocate(screen->base.mm_GART, buf->base.width0,
- &buf->bo, &buf->offset);
- if (!buf->bo)
- return FALSE;
- }
- if (domain != NOUVEAU_BO_GART) {
- if (!buf->data) {
- buf->data = MALLOC(buf->base.width0);
- if (!buf->data)
- return FALSE;
- }
- }
- buf->domain = domain;
- return TRUE;
-}
-
-static INLINE void
-release_allocation(struct nouveau_mm_allocation **mm, struct nouveau_fence *fence)
-{
- nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
- (*mm) = NULL;
-}
-
-INLINE void
-nv50_buffer_release_gpu_storage(struct nv50_resource *buf)
-{
- nouveau_bo_ref(NULL, &buf->bo);
-
- if (buf->mm)
- release_allocation(&buf->mm, buf->fence);
-
- buf->domain = 0;
-}
-
-static INLINE boolean
-nv50_buffer_reallocate(struct nv50_screen *screen, struct nv50_resource *buf,
- unsigned domain)
-{
- nv50_buffer_release_gpu_storage(buf);
-
- return nv50_buffer_allocate(screen, buf, domain);
-}
-
-static void
-nv50_buffer_destroy(struct pipe_screen *pscreen,
- struct pipe_resource *presource)
-{
- struct nv50_resource *res = nv50_resource(presource);
-
- nv50_buffer_release_gpu_storage(res);
-
- if (res->data && !(res->status & NV50_BUFFER_STATUS_USER_MEMORY))
- FREE(res->data);
-
- FREE(res);
-}
-
-/* Maybe just migrate to GART right away if we actually need to do this. */
-boolean
-nv50_buffer_download(struct nv50_context *nv50, struct nv50_resource *buf,
- unsigned start, unsigned size)
-{
- struct nouveau_mm_allocation *mm;
- struct nouveau_bo *bounce = NULL;
- uint32_t offset;
-
- assert(buf->domain == NOUVEAU_BO_VRAM);
-
- mm = nouveau_mm_allocate(nv50->screen->base.mm_GART, size, &bounce, &offset);
- if (!bounce)
- return FALSE;
-
- nv50_m2mf_copy_linear(nv50, bounce, offset, NOUVEAU_BO_GART,
- buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
- size);
-
- if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
- return FALSE;
- memcpy(buf->data + start, bounce->map, size);
- nouveau_bo_unmap(bounce);
-
- buf->status &= ~NV50_BUFFER_STATUS_DIRTY;
-
- nouveau_bo_ref(NULL, &bounce);
- if (mm)
- nouveau_mm_free(mm);
- return TRUE;
-}
-
-static boolean
-nv50_buffer_upload(struct nv50_context *nv50, struct nv50_resource *buf,
- unsigned start, unsigned size)
-{
- struct nouveau_mm_allocation *mm;
- struct nouveau_bo *bounce = NULL;
- uint32_t offset;
-
- if (size <= 192) {
- nv50_sifc_linear_u8(nv50, buf->bo, buf->domain, buf->offset + start,
- size, buf->data + start);
- return TRUE;
- }
-
- mm = nouveau_mm_allocate(nv50->screen->base.mm_GART, size, &bounce, &offset);
- if (!bounce)
- return FALSE;
-
- nouveau_bo_map_range(bounce, offset, size,
- NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
- memcpy(bounce->map, buf->data + start, size);
- nouveau_bo_unmap(bounce);
-
- nv50_m2mf_copy_linear(nv50, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
- bounce, offset, NOUVEAU_BO_GART, size);
-
- nouveau_bo_ref(NULL, &bounce);
- if (mm)
- release_allocation(&mm, nv50->screen->base.fence.current);
-
- if (start == 0 && size == buf->base.width0)
- buf->status &= ~NV50_BUFFER_STATUS_DIRTY;
- return TRUE;
-}
-
-static struct pipe_transfer *
-nv50_buffer_transfer_get(struct pipe_context *pipe,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
-{
- struct nv50_resource *buf = nv50_resource(resource);
- struct nv50_transfer *xfr = CALLOC_STRUCT(nv50_transfer);
- if (!xfr)
- return NULL;
-
- xfr->base.resource = resource;
- xfr->base.box.x = box->x;
- xfr->base.box.width = box->width;
- xfr->base.usage = usage;
-
- if (buf->domain == NOUVEAU_BO_VRAM) {
- if (usage & PIPE_TRANSFER_READ) {
- if (buf->status & NV50_BUFFER_STATUS_DIRTY)
- nv50_buffer_download(nv50_context(pipe), buf, 0, buf->base.width0);
- }
- }
-
- return &xfr->base;
-}
-
-static void
-nv50_buffer_transfer_destroy(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- struct nv50_resource *buf = nv50_resource(transfer->resource);
- struct nv50_transfer *xfr = nv50_transfer(transfer);
-
- if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
- /* writing is worse */
- nv50_buffer_adjust_score(nv50_context(pipe), buf, -5000);
-
- if (buf->domain == NOUVEAU_BO_VRAM) {
- nv50_buffer_upload(nv50_context(pipe), buf,
- transfer->box.x, transfer->box.width);
- }
-
- if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
- PIPE_BIND_INDEX_BUFFER)))
- nv50_context(pipe)->vbo_dirty = TRUE;
- }
-
- FREE(xfr);
-}
-
-static INLINE boolean
-nv50_buffer_sync(struct nv50_resource *buf, unsigned rw)
-{
- if (rw == PIPE_TRANSFER_READ) {
- if (!buf->fence_wr)
- return TRUE;
- if (!nouveau_fence_wait(buf->fence_wr))
- return FALSE;
- } else {
- if (!buf->fence)
- return TRUE;
- if (!nouveau_fence_wait(buf->fence))
- return FALSE;
-
- nouveau_fence_ref(NULL, &buf->fence);
- }
- nouveau_fence_ref(NULL, &buf->fence_wr);
-
- return TRUE;
-}
-
-static INLINE boolean
-nv50_buffer_busy(struct nv50_resource *buf, unsigned rw)
-{
- if (rw == PIPE_TRANSFER_READ)
- return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
- else
- return (buf->fence && !nouveau_fence_signalled(buf->fence));
-}
-
-static void *
-nv50_buffer_transfer_map(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- struct nv50_transfer *xfr = nv50_transfer(transfer);
- struct nv50_resource *buf = nv50_resource(transfer->resource);
- struct nouveau_bo *bo = buf->bo;
- uint8_t *map;
- int ret;
- uint32_t offset = xfr->base.box.x;
- uint32_t flags;
-
- nv50_buffer_adjust_score(nv50_context(pipe), buf, -250);
-
- if (buf->domain != NOUVEAU_BO_GART)
- return buf->data + offset;
-
- if (buf->mm)
- flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
- else
- flags = nouveau_screen_transfer_flags(xfr->base.usage);
-
- offset += buf->offset;
-
- ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
- if (ret)
- return NULL;
- map = bo->map;
-
- /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
- * not doing so might make future maps fail or trigger "reloc while mapped"
- * errors. For now, mappings to userspace are guaranteed to be persistent.
- */
- nouveau_bo_unmap(bo);
-
- if (buf->mm) {
- if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
- if (nv50_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
- return NULL;
- } else
- if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- nv50_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
- }
- }
- return map;
-}
-
-
-
-static void
-nv50_buffer_transfer_flush_region(struct pipe_context *pipe,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- struct nv50_resource *res = nv50_resource(transfer->resource);
- struct nouveau_bo *bo = res->bo;
- unsigned offset = res->offset + transfer->box.x + box->x;
-
- /* not using non-snoop system memory yet, no need for cflush */
- if (1)
- return;
-
- /* XXX: maybe need to upload for VRAM buffers here */
-
- nouveau_screen_bo_map_flush_range(pipe->screen, bo, offset, box->width);
-}
-
-static void
-nv50_buffer_transfer_unmap(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- /* we've called nouveau_bo_unmap right after map */
-}
-
-const struct u_resource_vtbl nv50_buffer_vtbl =
-{
- u_default_resource_get_handle, /* get_handle */
- nv50_buffer_destroy, /* resource_destroy */
- NULL, /* is_resource_referenced */
- nv50_buffer_transfer_get, /* get_transfer */
- nv50_buffer_transfer_destroy, /* transfer_destroy */
- nv50_buffer_transfer_map, /* transfer_map */
- nv50_buffer_transfer_flush_region, /* transfer_flush_region */
- nv50_buffer_transfer_unmap, /* transfer_unmap */
- u_default_transfer_inline_write /* transfer_inline_write */
-};
-
-struct pipe_resource *
-nv50_buffer_create(struct pipe_screen *pscreen,
- const struct pipe_resource *templ)
-{
- struct nv50_screen *screen = nv50_screen(pscreen);
- struct nv50_resource *buffer;
- boolean ret;
-
- buffer = CALLOC_STRUCT(nv50_resource);
- if (!buffer)
- return NULL;
-
- buffer->base = *templ;
- buffer->vtbl = &nv50_buffer_vtbl;
- pipe_reference_init(&buffer->base.reference, 1);
- buffer->base.screen = pscreen;
-
- if (buffer->base.bind & PIPE_BIND_CONSTANT_BUFFER)
- ret = nv50_buffer_allocate(screen, buffer, 0);
- else
- ret = nv50_buffer_allocate(screen, buffer, NOUVEAU_BO_GART);
-
- if (ret == FALSE)
- goto fail;
-
- return &buffer->base;
-
-fail:
- FREE(buffer);
- return NULL;
-}
-
-
-struct pipe_resource *
-nv50_user_buffer_create(struct pipe_screen *pscreen,
- void *ptr,
- unsigned bytes,
- unsigned bind)
-{
- struct nv50_resource *buffer;
-
- buffer = CALLOC_STRUCT(nv50_resource);
- if (!buffer)
- return NULL;
-
- pipe_reference_init(&buffer->base.reference, 1);
- buffer->vtbl = &nv50_buffer_vtbl;
- buffer->base.screen = pscreen;
- buffer->base.format = PIPE_FORMAT_R8_UNORM;
- buffer->base.usage = PIPE_USAGE_IMMUTABLE;
- buffer->base.bind = bind;
- buffer->base.width0 = bytes;
- buffer->base.height0 = 1;
- buffer->base.depth0 = 1;
-
- buffer->data = ptr;
- buffer->status = NV50_BUFFER_STATUS_USER_MEMORY;
-
- return &buffer->base;
-}
-
-/* Like download, but for GART buffers. Merge ? */
-static INLINE boolean
-nv50_buffer_data_fetch(struct nv50_resource *buf,
- struct nouveau_bo *bo, unsigned offset, unsigned size)
-{
- if (!buf->data) {
- buf->data = MALLOC(size);
- if (!buf->data)
- return FALSE;
- }
- if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
- return FALSE;
- memcpy(buf->data, bo->map, size);
- nouveau_bo_unmap(bo);
-
- return TRUE;
-}
-
-/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
-boolean
-nv50_buffer_migrate(struct nv50_context *nv50,
- struct nv50_resource *buf, const unsigned new_domain)
-{
- struct nv50_screen *screen = nv50_screen(buf->base.screen);
- struct nouveau_bo *bo;
- const unsigned old_domain = buf->domain;
- unsigned size = buf->base.width0;
- unsigned offset;
- int ret;
-
- assert(new_domain != old_domain);
-
- if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
- if (!nv50_buffer_allocate(screen, buf, new_domain))
- return FALSE;
- ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
- NOUVEAU_BO_NOSYNC);
- if (ret)
- return ret;
- memcpy(buf->bo->map, buf->data, size);
- nouveau_bo_unmap(buf->bo);
- FREE(buf->data);
- } else
- if (old_domain != 0 && new_domain != 0) {
- struct nouveau_mm_allocation *mm = buf->mm;
-
- if (new_domain == NOUVEAU_BO_VRAM) {
- /* keep a system memory copy of our data in case we hit a fallback */
- if (!nv50_buffer_data_fetch(buf, buf->bo, buf->offset, size))
- return FALSE;
- debug_printf("migrating %u KiB to VRAM\n", size / 1024);
- }
-
- offset = buf->offset;
- bo = buf->bo;
- buf->bo = NULL;
- buf->mm = NULL;
- nv50_buffer_allocate(screen, buf, new_domain);
-
- nv50_m2mf_copy_linear(nv50, buf->bo, buf->offset, new_domain,
- bo, offset, old_domain, buf->base.width0);
-
- nouveau_bo_ref(NULL, &bo);
- if (mm)
- release_allocation(&mm, screen->base.fence.current);
- } else
- if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
- if (!nv50_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
- return FALSE;
- if (!nv50_buffer_upload(nv50, buf, 0, buf->base.width0))
- return FALSE;
- } else
- return FALSE;
-
- assert(buf->domain == new_domain);
- return TRUE;
-}
-
-/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
- * We'd like to only allocate @size bytes here, but then we'd have to rebase
- * the vertex indices ...
- */
-boolean
-nv50_user_buffer_upload(struct nv50_resource *buf, unsigned base, unsigned size)
-{
- struct nv50_screen *screen = nv50_screen(buf->base.screen);
- int ret;
-
- assert(buf->status & NV50_BUFFER_STATUS_USER_MEMORY);
-
- buf->base.width0 = base + size;
- if (!nv50_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
- return FALSE;
-
- ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
- NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
- if (ret)
- return FALSE;
- memcpy(buf->bo->map, buf->data + base, size);
- nouveau_bo_unmap(buf->bo);
-
- return TRUE;
-}
}
struct resident {
- struct nv50_resource *res;
+ struct nv04_resource *res;
uint32_t flags;
};
void
nv50_bufctx_add_resident(struct nv50_context *nv50, int ctx,
- struct nv50_resource *resource, uint32_t flags)
+ struct nv04_resource *resource, uint32_t flags)
{
struct resident rsd = { resource, flags };
void
nv50_bufctx_del_resident(struct nv50_context *nv50, int ctx,
- struct nv50_resource *resource)
+ struct nv04_resource *resource)
{
struct resident *rsd, *top;
unsigned i;
void nv50_bufctx_emit_relocs(struct nv50_context *);
void nv50_bufctx_add_resident(struct nv50_context *, int ctx,
- struct nv50_resource *, uint32_t flags);
+ struct nv04_resource *, uint32_t flags);
void nv50_bufctx_del_resident(struct nv50_context *, int ctx,
- struct nv50_resource *);
+ struct nv04_resource *);
static INLINE void
nv50_bufctx_reset(struct nv50_context *nv50, int ctx)
{
/* nv50_transfer.c */
void
-nv50_sifc_linear_u8(struct nv50_context *nv50,
- struct nouveau_bo *dst, unsigned domain, int offset,
+nv50_sifc_linear_u8(struct pipe_context *pipe,
+ struct nouveau_bo *dst, unsigned offset, unsigned domain,
unsigned size, void *data);
void
-nv50_m2mf_copy_linear(struct nv50_context *nv50,
+nv50_m2mf_copy_linear(struct pipe_context *pipe,
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
unsigned size);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
uint8_t *data;
struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
- struct nv50_resource *res = nv50_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer);
- data = nv50_resource_map_offset(nv50, res,
- vb->buffer_offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nv50->pipe, res,
+ vb->buffer_offset, NOUVEAU_BO_RD);
if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
data += info->index_bias * vb->stride;
}
if (info->indexed) {
- ctx.idxbuf = nv50_resource_map_offset(nv50,
- nv50_resource(nv50->idxbuf.buffer),
- nv50->idxbuf.offset, NOUVEAU_BO_RD);
+ ctx.idxbuf = nouveau_resource_map_offset(&nv50->pipe,
+ nv04_resource(nv50->idxbuf.buffer),
+ nv50->idxbuf.offset, NOUVEAU_BO_RD);
if (!ctx.idxbuf)
return;
index_size = nv50->idxbuf.index_size;
}
if (info->indexed)
- nv50_resource_unmap(nv50_resource(nv50->idxbuf.buffer));
+ nouveau_resource_unmap(nv04_resource(nv50->idxbuf.buffer));
for (i = 0; i < nv50->num_vtxbufs; ++i)
- nv50_resource_unmap(nv50_resource(nv50->vtxbuf[i].buffer));
+ nouveau_resource_unmap(nv04_resource(nv50->vtxbuf[i].buffer));
}
struct pipe_resource *resource,
unsigned face, int layer)
{
- struct nv50_resource *res = nv50_resource(resource);
+ struct nv04_resource *res = nv04_resource(resource);
unsigned flags = 0;
unsigned bo_flags = nouveau_bo_pending(res->bo);
{
switch (templ->target) {
case PIPE_BUFFER:
- return nv50_buffer_create(screen, templ);
+ return nouveau_buffer_create(screen, templ);
default:
return nv50_miptree_create(screen, templ);
}
pscreen->resource_from_handle = nv50_resource_from_handle;
pscreen->resource_get_handle = u_resource_get_handle_vtbl;
pscreen->resource_destroy = u_resource_destroy_vtbl;
- pscreen->user_buffer_create = nv50_user_buffer_create;
+ pscreen->user_buffer_create = nouveau_user_buffer_create;
}
#include "util/u_double_list.h"
#define NOUVEAU_NVC0
#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_buffer.h"
#undef NOUVEAU_NVC0
-struct pipe_resource;
-struct nouveau_bo;
-struct nv50_context;
-
-#define NV50_BUFFER_SCORE_MIN -25000
-#define NV50_BUFFER_SCORE_MAX 25000
-#define NV50_BUFFER_SCORE_VRAM_THRESHOLD 20000
-
-/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
- * resource->data has not been updated to reflect modified VRAM contents
- *
- * USER_MEMORY: resource->data is a pointer to client memory and may change
- * between GL calls
- */
-#define NV50_BUFFER_STATUS_DIRTY (1 << 0)
-#define NV50_BUFFER_STATUS_USER_MEMORY (1 << 7)
-
-/* Resources, if mapped into the GPU's address space, are guaranteed to
- * have constant virtual addresses.
- * The address of a resource will lie within the nouveau_bo referenced,
- * and this bo should be added to the memory manager's validation list.
- */
-struct nv50_resource {
- struct pipe_resource base;
- const struct u_resource_vtbl *vtbl;
-
- uint8_t *data;
- struct nouveau_bo *bo;
- uint32_t offset;
-
- uint8_t status;
- uint8_t domain;
-
- int16_t score; /* low if mapped very often, if high can move to VRAM */
-
- struct nouveau_fence *fence;
- struct nouveau_fence *fence_wr;
-
- struct nouveau_mm_allocation *mm;
-};
-
void
-nv50_buffer_release_gpu_storage(struct nv50_resource *);
-
-boolean
-nv50_buffer_download(struct nv50_context *, struct nv50_resource *,
- unsigned start, unsigned size);
-
-boolean
-nv50_buffer_migrate(struct nv50_context *,
- struct nv50_resource *, unsigned domain);
-
-static INLINE void
-nv50_buffer_adjust_score(struct nv50_context *nv50, struct nv50_resource *res,
- int16_t score)
-{
- if (score < 0) {
- if (res->score > NV50_BUFFER_SCORE_MIN)
- res->score += score;
- } else
- if (score > 0){
- if (res->score < NV50_BUFFER_SCORE_MAX)
- res->score += score;
- if (res->domain == NOUVEAU_BO_GART &&
- res->score > NV50_BUFFER_SCORE_VRAM_THRESHOLD)
- nv50_buffer_migrate(nv50, res, NOUVEAU_BO_VRAM);
- }
-}
-
-/* XXX: wait for fence (atm only using this for vertex push) */
-static INLINE void *
-nv50_resource_map_offset(struct nv50_context *nv50,
- struct nv50_resource *res, uint32_t offset,
- uint32_t flags)
-{
- void *map;
-
- nv50_buffer_adjust_score(nv50, res, -250);
-
- if ((res->domain == NOUVEAU_BO_VRAM) &&
- (res->status & NV50_BUFFER_STATUS_DIRTY))
- nv50_buffer_download(nv50, res, 0, res->base.width0);
-
- if ((res->domain != NOUVEAU_BO_GART) ||
- (res->status & NV50_BUFFER_STATUS_USER_MEMORY))
- return res->data + offset;
-
- if (res->mm)
- flags |= NOUVEAU_BO_NOSYNC;
-
- if (nouveau_bo_map_range(res->bo, res->offset + offset,
- res->base.width0, flags))
- return NULL;
-
- map = res->bo->map;
- nouveau_bo_unmap(res->bo);
- return map;
-}
+nv50_init_resource_functions(struct pipe_context *pcontext);
-static INLINE void
-nv50_resource_unmap(struct nv50_resource *res)
-{
- /* no-op */
-}
+void
+nv50_screen_init_resource_functions(struct pipe_screen *pscreen);
#define NV50_TILE_DIM_SHIFT(m, d) (((m) >> (d * 4)) & 0xf)
#define NV50_MAX_TEXTURE_LEVELS 16
struct nv50_miptree {
- struct nv50_resource base;
+ struct nv04_resource base;
struct nv50_miptree_level level[NV50_MAX_TEXTURE_LEVELS];
uint32_t total_size;
uint32_t layer_stride;
return (struct nv50_miptree *)pt;
}
-static INLINE struct nv50_resource *
-nv50_resource(struct pipe_resource *resource)
-{
- return (struct nv50_resource *)resource;
-}
-
-/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
-static INLINE boolean
-nv50_resource_mapped_by_gpu(struct pipe_resource *resource)
-{
- return nv50_resource(resource)->domain != 0;
-}
-
-void
-nv50_init_resource_functions(struct pipe_context *pcontext);
-
-void
-nv50_screen_init_resource_functions(struct pipe_screen *pscreen);
-
/* Internal functions:
*/
struct pipe_resource *
const struct pipe_resource *template,
struct winsys_handle *whandle);
-struct pipe_resource *
-nv50_buffer_create(struct pipe_screen *pscreen,
- const struct pipe_resource *templ);
-
-struct pipe_resource *
-nv50_user_buffer_create(struct pipe_screen *screen,
- void *ptr,
- unsigned bytes,
- unsigned usage);
-
-
struct pipe_surface *
nv50_miptree_surface_new(struct pipe_context *,
struct pipe_resource *,
void
nv50_miptree_surface_del(struct pipe_context *, struct pipe_surface *);
-boolean
-nv50_user_buffer_upload(struct nv50_resource *, unsigned base, unsigned size);
-
#endif
screen->base.vertex_buffer_flags = screen->base.index_buffer_flags =
NOUVEAU_BO_GART;
+ screen->base.copy_data = nv50_m2mf_copy_linear;
+ screen->base.push_data = nv50_sifc_linear_u8;
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
&screen->fence.bo);
int nv50_screen_tsc_alloc(struct nv50_screen *, void *);
static INLINE void
-nv50_resource_fence(struct nv50_resource *res, uint32_t flags)
+nv50_resource_fence(struct nv04_resource *res, uint32_t flags)
{
struct nv50_screen *screen = nv50_screen(res->base.screen);
}
static INLINE void
-nv50_resource_validate(struct nv50_resource *res, uint32_t flags)
+nv50_resource_validate(struct nv04_resource *res, uint32_t flags)
{
struct nv50_screen *screen = nv50_screen(res->base.screen);
unsigned s;
for (s = 0; s < 3; ++s) {
- struct nv50_resource *res;
+ struct nv04_resource *res;
int i;
unsigned p, b;
i = ffs(nv50->constbuf_dirty[s]) - 1;
nv50->constbuf_dirty[s] &= ~(1 << i);
- res = nv50_resource(nv50->constbuf[s][i]);
+ res = nv04_resource(nv50->constbuf[s][i]);
if (!res) {
if (i != 0) {
BEGIN_RING(chan, RING_3D(SET_PROGRAM_CB), 1);
assert(0);
- if (!nv50_resource_mapped_by_gpu(&res->base)) {
- nv50_buffer_migrate(nv50, res, NOUVEAU_BO_VRAM);
+ if (!nouveau_resource_mapped_by_gpu(&res->base)) {
+ nouveau_buffer_migrate(&nv50->pipe, res, NOUVEAU_BO_VRAM);
BEGIN_RING(chan, RING_3D(CODE_CB_FLUSH), 1);
OUT_RING (chan, 0);
return FALSE;
prog->code_base = prog->res->start;
- nv50_sifc_linear_u8(nv50, nv50->screen->code, NOUVEAU_BO_VRAM,
- (prog->type << 16) + prog->code_base, prog->code_size,
- prog->code);
+ nv50_sifc_linear_u8(&nv50->pipe, nv50->screen->code,
+ (prog->type << 16) + prog->code_base,
+ NOUVEAU_BO_VRAM, prog->code_size, prog->code);
BEGIN_RING(nv50->screen->base.channel, RING_3D(CODE_CB_FLUSH), 1);
OUT_RING (nv50->screen->base.channel, 0);
if (nv50->constbuf[shader][index])
nv50_bufctx_del_resident(nv50, NV50_BUFCTX_CONSTANT,
- nv50_resource(
- nv50->constbuf[shader][index]));
+ nv04_resource(nv50->constbuf[shader][index]));
pipe_resource_reference(&nv50->constbuf[shader][index], res);
for (i = 0; i < nv50->num_textures[s]; ++i) {
struct nv50_tic_entry *tic = nv50_tic_entry(nv50->textures[s][i]);
- struct nv50_resource *res;
+ struct nv04_resource *res;
if (!tic) {
BEGIN_RING(chan, RING_3D(BIND_TIC(s)), 1);
if (tsc->id < 0) {
tsc->id = nv50_screen_tsc_alloc(nv50->screen, tsc);
- nv50_sifc_linear_u8(nv50, nv50->screen->txc, NOUVEAU_BO_VRAM,
- 65536 + tsc->id * 32, 32, tsc->tsc);
+ nv50_sifc_linear_u8(&nv50->pipe, nv50->screen->txc,
+ 65536 + tsc->id * 32,
+ NOUVEAU_BO_VRAM, 32, tsc->tsc);
need_flush = TRUE;
}
nv50->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
}
void
-nv50_sifc_linear_u8(struct nv50_context *nv50,
- struct nouveau_bo *dst, unsigned domain, int offset,
+nv50_sifc_linear_u8(struct pipe_context *pipe,
+ struct nouveau_bo *dst, unsigned offset, unsigned domain,
unsigned size, void *data)
{
+ struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_channel *chan = nv50->screen->base.channel;
uint32_t *src = (uint32_t *)data;
unsigned count = (size + 3) / 4;
}
void
-nv50_m2mf_copy_linear(struct nv50_context *nv50,
+nv50_m2mf_copy_linear(struct pipe_context *pipe,
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
unsigned size)
{
+ struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_channel *chan = nv50->screen->base.channel;
BEGIN_RING(chan, RING_MF(LINEAR_IN), 1);
{
const void *data;
struct nouveau_channel *chan = nv50->screen->base.channel;
- struct nv50_resource *res = nv50_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer);
float v[4];
const unsigned nc = util_format_get_nr_components(ve->src_format);
- data = nv50_resource_map_offset(nv50, res, vb->buffer_offset +
- ve->src_offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nv50->pipe, res, vb->buffer_offset +
+ ve->src_offset, NOUVEAU_BO_RD);
util_format_read_4f(ve->src_format, v, 0, data, 0, 0, 0, 1, 1);
nv50_prevalidate_vbufs(struct nv50_context *nv50)
{
struct pipe_vertex_buffer *vb;
- struct nv50_resource *buf;
+ struct nv04_resource *buf;
int i;
uint32_t base, size;
vb = &nv50->vtxbuf[i];
if (!vb->stride)
continue;
- buf = nv50_resource(vb->buffer);
+ buf = nv04_resource(vb->buffer);
/* NOTE: user buffers with temporary storage count as mapped by GPU */
- if (!nv50_resource_mapped_by_gpu(vb->buffer)) {
+ if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
if (nv50->vbo_push_hint) {
nv50->vbo_fifo = ~0;
continue;
} else {
- if (buf->status & NV50_BUFFER_STATUS_USER_MEMORY) {
+ if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
nv50->vbo_user |= 1 << i;
assert(vb->stride > vb->buffer_offset);
nv50_vbuf_range(nv50, i, &base, &size);
- nv50_user_buffer_upload(buf, base, size);
+ nouveau_user_buffer_upload(buf, base, size);
} else {
- nv50_buffer_migrate(nv50, buf, NOUVEAU_BO_GART);
+ nouveau_buffer_migrate(&nv50->pipe, buf, NOUVEAU_BO_GART);
}
nv50->vbo_dirty = TRUE;
}
}
nv50_bufctx_add_resident(nv50, NV50_BUFCTX_VERTEX, buf, NOUVEAU_BO_RD);
- nv50_buffer_adjust_score(nv50, buf, 1);
+ nouveau_buffer_adjust_score(&nv50->pipe, buf, 1);
}
}
struct pipe_vertex_element *ve = &nv50->vertex->element[i].pipe;
const int b = ve->vertex_buffer_index;
struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
- struct nv50_resource *buf = nv50_resource(vb->buffer);
+ struct nv04_resource *buf = nv04_resource(vb->buffer);
if (!(nv50->vbo_user & (1 << b)))
continue;
if (!(written & (1 << b))) {
written |= 1 << b;
- nv50_user_buffer_upload(buf, base, size);
+ nouveau_user_buffer_upload(buf, base, size);
}
offset = vb->buffer_offset + ve->src_offset;
int i = ffs(vbo_user) - 1;
vbo_user &= ~(1 << i);
- nv50_buffer_release_gpu_storage(nv50_resource(nv50->vtxbuf[i].buffer));
+ nouveau_buffer_release_gpu_storage(nv04_resource(nv50->vtxbuf[i].buffer));
}
}
}
for (i = 0; i < vertex->num_elements; ++i) {
- struct nv50_resource *res;
+ struct nv04_resource *res;
unsigned size, offset;
ve = &vertex->element[i];
OUT_RING (chan, 0);
}
- res = nv50_resource(vb->buffer);
+ res = nv04_resource(vb->buffer);
if (nv50->vbo_fifo || unlikely(vb->stride == 0)) {
if (!nv50->vbo_fifo)
nv50->state.index_bias = index_bias;
}
- if (nv50_resource_mapped_by_gpu(nv50->idxbuf.buffer) && 0) {
- struct nv50_resource *res = nv50_resource(nv50->idxbuf.buffer);
+ if (nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer) && 0) {
+ struct nv04_resource *res = nv04_resource(nv50->idxbuf.buffer);
unsigned offset = res->offset + nv50->idxbuf.offset;
- nv50_buffer_adjust_score(nv50, res, 1);
+ nouveau_buffer_adjust_score(&nv50->pipe, res, 1);
while (instance_count--) {
BEGIN_RING(chan, RING_3D(VERTEX_BEGIN_GL), 1);
mode |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
- data = nv50_resource_map_offset(nv50, nv50_resource(nv50->idxbuf.buffer),
- nv50->idxbuf.offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nv50->pipe,
+ nv04_resource(nv50->idxbuf.buffer),
+ nv50->idxbuf.offset, NOUVEAU_BO_RD);
if (!data)
return;
#include <stdint.h>
#include <unistd.h>
+
#include "pipe/p_defines.h"
#include "nouveau/nouveau_bo.h"
#include "nouveau/nouveau_resource.h"
#include "nouveau/nouveau_pushbuf.h"
#include "nouveau/nouveau_reloc.h"
+#include "nouveau/nouveau_notifier.h"
-#include "nv50_resource.h" /* OUT_RESRC */
+#include "nouveau/nouveau_buffer.h"
#ifndef NV04_PFIFO_MAX_PACKET_LEN
#define NV04_PFIFO_MAX_PACKET_LEN 2047
}
static INLINE int
-OUT_RESRCh(struct nouveau_channel *chan, struct nv50_resource *res,
+OUT_RESRCh(struct nouveau_channel *chan, struct nv04_resource *res,
unsigned delta, unsigned flags)
{
return OUT_RELOCh(chan, res->bo, res->offset + delta, res->domain | flags);
}
static INLINE int
-OUT_RESRCl(struct nouveau_channel *chan, struct nv50_resource *res,
+OUT_RESRCl(struct nouveau_channel *chan, struct nv04_resource *res,
unsigned delta, unsigned flags)
{
if (flags & NOUVEAU_BO_WR)
- res->status |= NV50_BUFFER_STATUS_DIRTY;
+ res->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
return OUT_RELOCl(chan, res->bo, res->offset + delta, res->domain | flags);
}