LIBNAME = nvc0
C_SOURCES = \
- nvc0_buffer.c \
nvc0_context.c \
nvc0_draw.c \
nvc0_formats.c \
nvc0_pc_regalloc.c \
nvc0_push.c \
nvc0_push2.c \
- nvc0_fence.c \
- nvc0_mm.c \
nvc0_query.c
LIBRARY_INCLUDES = \
+++ /dev/null
-
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include "util/u_math.h"
-
-#define NOUVEAU_NVC0
-#include "nouveau/nouveau_screen.h"
-#include "nouveau/nouveau_winsys.h"
-#undef NOUVEAU_NVC0
-
-#include "nvc0_context.h"
-#include "nvc0_resource.h"
-
-struct nvc0_transfer {
- struct pipe_transfer base;
-};
-
-static INLINE struct nvc0_transfer *
-nvc0_transfer(struct pipe_transfer *transfer)
-{
- return (struct nvc0_transfer *)transfer;
-}
-
-static INLINE boolean
-nvc0_buffer_allocate(struct nvc0_screen *screen, struct nvc0_resource *buf,
- unsigned domain)
-{
- if (domain == NOUVEAU_BO_VRAM) {
- buf->mm = nvc0_mm_allocate(screen->mm_VRAM, buf->base.width0, &buf->bo,
- &buf->offset);
- if (!buf->bo)
- return nvc0_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
- } else
- if (domain == NOUVEAU_BO_GART) {
- buf->mm = nvc0_mm_allocate(screen->mm_GART, buf->base.width0, &buf->bo,
- &buf->offset);
- if (!buf->bo)
- return FALSE;
- }
- if (domain != NOUVEAU_BO_GART) {
- if (!buf->data) {
- buf->data = MALLOC(buf->base.width0);
- if (!buf->data)
- return FALSE;
- }
- }
- buf->domain = domain;
- return TRUE;
-}
-
-static INLINE void
-release_allocation(struct nvc0_mm_allocation **mm, struct nvc0_fence *fence)
-{
- if (fence && fence->state != NVC0_FENCE_STATE_SIGNALLED) {
- nvc0_fence_sched_release(fence, *mm);
- } else {
- nvc0_mm_free(*mm);
- }
- (*mm) = NULL;
-}
-
-INLINE void
-nvc0_buffer_release_gpu_storage(struct nvc0_resource *buf)
-{
- nouveau_bo_ref(NULL, &buf->bo);
-
- if (buf->mm)
- release_allocation(&buf->mm, buf->fence);
-
- buf->domain = 0;
-}
-
-static INLINE boolean
-nvc0_buffer_reallocate(struct nvc0_screen *screen, struct nvc0_resource *buf,
- unsigned domain)
-{
- nvc0_buffer_release_gpu_storage(buf);
-
- return nvc0_buffer_allocate(screen, buf, domain);
-}
-
-static void
-nvc0_buffer_destroy(struct pipe_screen *pscreen,
- struct pipe_resource *presource)
-{
- struct nvc0_resource *res = nvc0_resource(presource);
-
- nvc0_buffer_release_gpu_storage(res);
-
- if (res->data && !(res->status & NVC0_BUFFER_STATUS_USER_MEMORY))
- FREE(res->data);
-
- FREE(res);
-}
-
-/* Maybe just migrate to GART right away if we actually need to do this. */
-boolean
-nvc0_buffer_download(struct nvc0_context *nvc0, struct nvc0_resource *buf,
- unsigned start, unsigned size)
-{
- struct nvc0_mm_allocation *mm;
- struct nouveau_bo *bounce = NULL;
- uint32_t offset;
-
- assert(buf->domain == NOUVEAU_BO_VRAM);
-
- mm = nvc0_mm_allocate(nvc0->screen->mm_GART, size, &bounce, &offset);
- if (!bounce)
- return FALSE;
-
- nvc0_m2mf_copy_linear(nvc0, bounce, offset, NOUVEAU_BO_GART,
- buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
- size);
-
- if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
- return FALSE;
- memcpy(buf->data + start, bounce->map, size);
- nouveau_bo_unmap(bounce);
-
- buf->status &= ~NVC0_BUFFER_STATUS_GPU_WRITING;
-
- nouveau_bo_ref(NULL, &bounce);
- if (mm)
- nvc0_mm_free(mm);
- return TRUE;
-}
-
-static boolean
-nvc0_buffer_upload(struct nvc0_context *nvc0, struct nvc0_resource *buf,
- unsigned start, unsigned size)
-{
- struct nvc0_mm_allocation *mm;
- struct nouveau_bo *bounce = NULL;
- uint32_t offset;
-
- if (size <= 192) {
- nvc0_m2mf_push_linear(nvc0, buf->bo, buf->domain, buf->offset + start,
- size, buf->data + start);
- return TRUE;
- }
-
- mm = nvc0_mm_allocate(nvc0->screen->mm_GART, size, &bounce, &offset);
- if (!bounce)
- return FALSE;
-
- nouveau_bo_map_range(bounce, offset, size,
- NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
- memcpy(bounce->map, buf->data + start, size);
- nouveau_bo_unmap(bounce);
-
- nvc0_m2mf_copy_linear(nvc0, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
- bounce, offset, NOUVEAU_BO_GART, size);
-
- nouveau_bo_ref(NULL, &bounce);
- if (mm)
- release_allocation(&mm, nvc0->screen->fence.current);
-
- if (start == 0 && size == buf->base.width0)
- buf->status &= ~NVC0_BUFFER_STATUS_GPU_WRITING;
- return TRUE;
-}
-
-static struct pipe_transfer *
-nvc0_buffer_transfer_get(struct pipe_context *pipe,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
-{
- struct nvc0_resource *buf = nvc0_resource(resource);
- struct nvc0_transfer *xfr = CALLOC_STRUCT(nvc0_transfer);
- if (!xfr)
- return NULL;
-
- xfr->base.resource = resource;
- xfr->base.box.x = box->x;
- xfr->base.box.width = box->width;
- xfr->base.usage = usage;
-
- if (buf->domain == NOUVEAU_BO_VRAM) {
- if (usage & PIPE_TRANSFER_READ) {
- if (buf->status & NVC0_BUFFER_STATUS_GPU_WRITING)
- nvc0_buffer_download(nvc0_context(pipe), buf, 0, buf->base.width0);
- }
- }
-
- return &xfr->base;
-}
-
-static void
-nvc0_buffer_transfer_destroy(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- struct nvc0_resource *buf = nvc0_resource(transfer->resource);
- struct nvc0_transfer *xfr = nvc0_transfer(transfer);
-
- if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
- /* writing is worse */
- nvc0_buffer_adjust_score(nvc0_context(pipe), buf, -5000);
-
- if (buf->domain == NOUVEAU_BO_VRAM) {
- nvc0_buffer_upload(nvc0_context(pipe), buf,
- transfer->box.x, transfer->box.width);
- }
-
- if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
- PIPE_BIND_INDEX_BUFFER)))
- nvc0_context(pipe)->vbo_dirty = TRUE;
- }
-
- FREE(xfr);
-}
-
-static INLINE boolean
-nvc0_buffer_sync(struct nvc0_resource *buf, unsigned rw)
-{
- if (rw == PIPE_TRANSFER_READ) {
- if (!buf->fence_wr)
- return TRUE;
- if (!nvc0_fence_wait(buf->fence_wr))
- return FALSE;
- } else {
- if (!buf->fence)
- return TRUE;
- if (!nvc0_fence_wait(buf->fence))
- return FALSE;
-
- nvc0_fence_reference(&buf->fence, NULL);
- }
- nvc0_fence_reference(&buf->fence_wr, NULL);
-
- return TRUE;
-}
-
-static INLINE boolean
-nvc0_buffer_busy(struct nvc0_resource *buf, unsigned rw)
-{
- if (rw == PIPE_TRANSFER_READ)
- return (buf->fence_wr && !nvc0_fence_signalled(buf->fence_wr));
- else
- return (buf->fence && !nvc0_fence_signalled(buf->fence));
-}
-
-static void *
-nvc0_buffer_transfer_map(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- struct nvc0_transfer *xfr = nvc0_transfer(transfer);
- struct nvc0_resource *buf = nvc0_resource(transfer->resource);
- struct nouveau_bo *bo = buf->bo;
- uint8_t *map;
- int ret;
- uint32_t offset = xfr->base.box.x;
- uint32_t flags;
-
- nvc0_buffer_adjust_score(nvc0_context(pipe), buf, -250);
-
- if (buf->domain != NOUVEAU_BO_GART)
- return buf->data + offset;
-
- if (buf->mm)
- flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
- else
- flags = nouveau_screen_transfer_flags(xfr->base.usage);
-
- offset += buf->offset;
-
- ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
- if (ret)
- return NULL;
- map = bo->map;
-
- /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
- * not doing so might make future maps fail or trigger "reloc while mapped"
- * errors. For now, mappings to userspace are guaranteed to be persistent.
- */
- nouveau_bo_unmap(bo);
-
- if (buf->mm) {
- if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
- if (nvc0_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
- return NULL;
- } else
- if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- nvc0_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
- }
- }
- return map;
-}
-
-
-
-static void
-nvc0_buffer_transfer_flush_region(struct pipe_context *pipe,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- struct nvc0_resource *res = nvc0_resource(transfer->resource);
- struct nouveau_bo *bo = res->bo;
- unsigned offset = res->offset + transfer->box.x + box->x;
-
- /* not using non-snoop system memory yet, no need for cflush */
- if (1)
- return;
-
- /* XXX: maybe need to upload for VRAM buffers here */
-
- nouveau_screen_bo_map_flush_range(pipe->screen, bo, offset, box->width);
-}
-
-static void
-nvc0_buffer_transfer_unmap(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- /* we've called nouveau_bo_unmap right after map */
-}
-
-const struct u_resource_vtbl nvc0_buffer_vtbl =
-{
- u_default_resource_get_handle, /* get_handle */
- nvc0_buffer_destroy, /* resource_destroy */
- NULL, /* is_resource_referenced */
- nvc0_buffer_transfer_get, /* get_transfer */
- nvc0_buffer_transfer_destroy, /* transfer_destroy */
- nvc0_buffer_transfer_map, /* transfer_map */
- nvc0_buffer_transfer_flush_region, /* transfer_flush_region */
- nvc0_buffer_transfer_unmap, /* transfer_unmap */
- u_default_transfer_inline_write /* transfer_inline_write */
-};
-
-struct pipe_resource *
-nvc0_buffer_create(struct pipe_screen *pscreen,
- const struct pipe_resource *templ)
-{
- struct nvc0_screen *screen = nvc0_screen(pscreen);
- struct nvc0_resource *buffer;
- boolean ret;
-
- buffer = CALLOC_STRUCT(nvc0_resource);
- if (!buffer)
- return NULL;
-
- buffer->base = *templ;
- buffer->vtbl = &nvc0_buffer_vtbl;
- pipe_reference_init(&buffer->base.reference, 1);
- buffer->base.screen = pscreen;
-
- if (buffer->base.bind & PIPE_BIND_CONSTANT_BUFFER)
- ret = nvc0_buffer_allocate(screen, buffer, 0);
- else
- ret = nvc0_buffer_allocate(screen, buffer, NOUVEAU_BO_GART);
-
- if (ret == FALSE)
- goto fail;
-
- return &buffer->base;
-
-fail:
- FREE(buffer);
- return NULL;
-}
-
-
-struct pipe_resource *
-nvc0_user_buffer_create(struct pipe_screen *pscreen,
- void *ptr,
- unsigned bytes,
- unsigned bind)
-{
- struct nvc0_resource *buffer;
-
- buffer = CALLOC_STRUCT(nvc0_resource);
- if (!buffer)
- return NULL;
-
- pipe_reference_init(&buffer->base.reference, 1);
- buffer->vtbl = &nvc0_buffer_vtbl;
- buffer->base.screen = pscreen;
- buffer->base.format = PIPE_FORMAT_R8_UNORM;
- buffer->base.usage = PIPE_USAGE_IMMUTABLE;
- buffer->base.bind = bind;
- buffer->base.width0 = bytes;
- buffer->base.height0 = 1;
- buffer->base.depth0 = 1;
-
- buffer->data = ptr;
- buffer->status = NVC0_BUFFER_STATUS_USER_MEMORY;
-
- return &buffer->base;
-}
-
-/* Like download, but for GART buffers. Merge ? */
-static INLINE boolean
-nvc0_buffer_data_fetch(struct nvc0_resource *buf,
- struct nouveau_bo *bo, unsigned offset, unsigned size)
-{
- if (!buf->data) {
- buf->data = MALLOC(size);
- if (!buf->data)
- return FALSE;
- }
- if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
- return FALSE;
- memcpy(buf->data, bo->map, size);
- nouveau_bo_unmap(bo);
-
- return TRUE;
-}
-
-/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
-boolean
-nvc0_buffer_migrate(struct nvc0_context *nvc0,
- struct nvc0_resource *buf, const unsigned new_domain)
-{
- struct nvc0_screen *screen = nvc0_screen(buf->base.screen);
- struct nouveau_bo *bo;
- const unsigned old_domain = buf->domain;
- unsigned size = buf->base.width0;
- unsigned offset;
- int ret;
-
- assert(new_domain != old_domain);
-
- if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
- if (!nvc0_buffer_allocate(screen, buf, new_domain))
- return FALSE;
- ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
- NOUVEAU_BO_NOSYNC);
- if (ret)
- return ret;
- memcpy(buf->bo->map, buf->data, size);
- nouveau_bo_unmap(buf->bo);
- FREE(buf->data);
- } else
- if (old_domain != 0 && new_domain != 0) {
- struct nvc0_mm_allocation *mm = buf->mm;
-
- if (new_domain == NOUVEAU_BO_VRAM) {
- /* keep a system memory copy of our data in case we hit a fallback */
- if (!nvc0_buffer_data_fetch(buf, buf->bo, buf->offset, size))
- return FALSE;
- debug_printf("migrating %u KiB to VRAM\n", size / 1024);
- }
-
- offset = buf->offset;
- bo = buf->bo;
- buf->bo = NULL;
- buf->mm = NULL;
- nvc0_buffer_allocate(screen, buf, new_domain);
-
- nvc0_m2mf_copy_linear(nvc0, buf->bo, buf->offset, new_domain,
- bo, offset, old_domain, buf->base.width0);
-
- nouveau_bo_ref(NULL, &bo);
- if (mm)
- release_allocation(&mm, screen->fence.current);
- } else
- if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
- if (!nvc0_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
- return FALSE;
- if (!nvc0_buffer_upload(nvc0, buf, 0, buf->base.width0))
- return FALSE;
- } else
- return FALSE;
-
- assert(buf->domain == new_domain);
- return TRUE;
-}
-
-/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
- * We'd like to only allocate @size bytes here, but then we'd have to rebase
- * the vertex indices ...
- */
-boolean
-nvc0_user_buffer_upload(struct nvc0_resource *buf, unsigned base, unsigned size)
-{
- struct nvc0_screen *screen = nvc0_screen(buf->base.screen);
- int ret;
-
- assert(buf->status & NVC0_BUFFER_STATUS_USER_MEMORY);
-
- buf->base.width0 = base + size;
- if (!nvc0_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
- return FALSE;
-
- ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
- NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
- if (ret)
- return FALSE;
- memcpy(buf->bo->map, buf->data + base, size);
- nouveau_bo_unmap(buf->bo);
-
- return TRUE;
-}
}
if (fence)
- nvc0_fence_reference((struct nvc0_fence **)fence,
- nvc0->screen->fence.current);
+ nouveau_fence_ref(nvc0->screen->base.fence.current,
+ (struct nouveau_fence **)fence);
if (flags & (PIPE_FLUSH_SWAPBUFFERS | PIPE_FLUSH_FRAME))
FIRE_RING(chan);
{
struct nvc0_context *nvc0 = chan->user_private;
- nvc0_screen_fence_update(nvc0->screen, TRUE);
+ nouveau_fence_update(&nvc0->screen->base, TRUE);
- nvc0_screen_fence_next(nvc0->screen);
+ nouveau_fence_next(&nvc0->screen->base);
}
struct pipe_context *
}
struct resident {
- struct nvc0_resource *res;
+ struct nv04_resource *res;
uint32_t flags;
};
void
nvc0_bufctx_add_resident(struct nvc0_context *nvc0, int ctx,
- struct nvc0_resource *resource, uint32_t flags)
+ struct nv04_resource *resource, uint32_t flags)
{
struct resident rsd = { resource, flags };
void
nvc0_bufctx_del_resident(struct nvc0_context *nvc0, int ctx,
- struct nvc0_resource *resource)
+ struct nv04_resource *resource)
{
struct resident *rsd, *top;
unsigned i;
void nvc0_bufctx_emit_relocs(struct nvc0_context *);
void nvc0_bufctx_add_resident(struct nvc0_context *, int ctx,
- struct nvc0_resource *, uint32_t flags);
+ struct nv04_resource *, uint32_t flags);
void nvc0_bufctx_del_resident(struct nvc0_context *, int ctx,
- struct nvc0_resource *);
+ struct nv04_resource *);
static INLINE void
nvc0_bufctx_reset(struct nvc0_context *nvc0, int ctx)
{
/* nvc0_transfer.c */
void
-nvc0_m2mf_push_linear(struct nvc0_context *nvc0,
- struct nouveau_bo *dst, unsigned domain, int offset,
+nvc0_m2mf_push_linear(struct pipe_context *pipe,
+ struct nouveau_bo *dst, unsigned offset, unsigned domain,
unsigned size, void *data);
void
-nvc0_m2mf_copy_linear(struct nvc0_context *nvc0,
+nvc0_m2mf_copy_linear(struct pipe_context *pipe,
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
unsigned size);
+++ /dev/null
-/*
- * Copyright 2010 Christoph Bumiller
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
- * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "nvc0_fence.h"
-#include "nvc0_context.h"
-#include "nvc0_screen.h"
-
-#ifdef PIPE_OS_UNIX
-#include <sched.h>
-#endif
-
-boolean
-nvc0_screen_fence_new(struct nvc0_screen *screen, struct nvc0_fence **fence,
- boolean emit)
-{
- *fence = CALLOC_STRUCT(nvc0_fence);
- if (!*fence)
- return FALSE;
-
- (*fence)->screen = screen;
- (*fence)->ref = 1;
-
- if (emit)
- nvc0_fence_emit(*fence);
-
- return TRUE;
-}
-
-void
-nvc0_fence_emit(struct nvc0_fence *fence)
-{
- struct nvc0_screen *screen = fence->screen;
- struct nouveau_channel *chan = screen->base.channel;
-
- fence->sequence = ++screen->fence.sequence;
-
- assert(fence->state == NVC0_FENCE_STATE_AVAILABLE);
-
- MARK_RING (chan, 5, 2);
- BEGIN_RING(chan, RING_3D(QUERY_ADDRESS_HIGH), 4);
- OUT_RELOCh(chan, screen->fence.bo, 0, NOUVEAU_BO_WR);
- OUT_RELOCl(chan, screen->fence.bo, 0, NOUVEAU_BO_WR);
- OUT_RING (chan, fence->sequence);
- OUT_RING (chan, NVC0_3D_QUERY_GET_FENCE | NVC0_3D_QUERY_GET_SHORT |
- (0xf << NVC0_3D_QUERY_GET_UNIT__SHIFT));
-
- ++fence->ref;
-
- if (screen->fence.tail)
- screen->fence.tail->next = fence;
- else
- screen->fence.head = fence;
-
- screen->fence.tail = fence;
-
- fence->state = NVC0_FENCE_STATE_EMITTED;
-}
-
-static void
-nvc0_fence_trigger_release_buffers(struct nvc0_fence *fence);
-
-void
-nvc0_fence_del(struct nvc0_fence *fence)
-{
- struct nvc0_fence *it;
- struct nvc0_screen *screen = fence->screen;
-
- if (fence->state == NVC0_FENCE_STATE_EMITTED ||
- fence->state == NVC0_FENCE_STATE_FLUSHED) {
- if (fence == screen->fence.head) {
- screen->fence.head = fence->next;
- if (!screen->fence.head)
- screen->fence.tail = NULL;
- } else {
- for (it = screen->fence.head; it && it->next != fence; it = it->next);
- it->next = fence->next;
- if (screen->fence.tail == fence)
- screen->fence.tail = it;
- }
- }
-
- if (fence->buffers) {
- debug_printf("WARNING: deleting fence with buffers "
- "still hooked to it !\n");
- nvc0_fence_trigger_release_buffers(fence);
- }
-
- FREE(fence);
-}
-
-static void
-nvc0_fence_trigger_release_buffers(struct nvc0_fence *fence)
-{
- struct nvc0_mm_allocation *alloc = fence->buffers;
-
- while (alloc) {
- struct nvc0_mm_allocation *next = alloc->next;
- nvc0_mm_free(alloc);
- alloc = next;
- };
- fence->buffers = NULL;
-}
-
-void
-nvc0_screen_fence_update(struct nvc0_screen *screen, boolean flushed)
-{
- struct nvc0_fence *fence;
- struct nvc0_fence *next = NULL;
- uint32_t sequence = screen->fence.map[0];
-
- if (screen->fence.sequence_ack == sequence)
- return;
- screen->fence.sequence_ack = sequence;
-
- for (fence = screen->fence.head; fence; fence = next) {
- next = fence->next;
- sequence = fence->sequence;
-
- fence->state = NVC0_FENCE_STATE_SIGNALLED;
-
- if (fence->buffers)
- nvc0_fence_trigger_release_buffers(fence);
-
- nvc0_fence_reference(&fence, NULL);
-
- if (sequence == screen->fence.sequence_ack)
- break;
- }
- screen->fence.head = next;
- if (!next)
- screen->fence.tail = NULL;
-
- if (flushed) {
- for (fence = next; fence; fence = fence->next)
- fence->state = NVC0_FENCE_STATE_FLUSHED;
- }
-}
-
-boolean
-nvc0_fence_signalled(struct nvc0_fence *fence)
-{
- struct nvc0_screen *screen = fence->screen;
-
- if (fence->state >= NVC0_FENCE_STATE_EMITTED)
- nvc0_screen_fence_update(screen, FALSE);
-
- return fence->state == NVC0_FENCE_STATE_SIGNALLED;
-}
-
-#define NVC0_FENCE_MAX_SPINS (1 << 31)
-
-boolean
-nvc0_fence_wait(struct nvc0_fence *fence)
-{
- struct nvc0_screen *screen = fence->screen;
- uint32_t spins = 0;
-
- if (fence->state < NVC0_FENCE_STATE_EMITTED) {
- nvc0_fence_emit(fence);
-
- if (fence == screen->fence.current)
- nvc0_screen_fence_new(screen, &screen->fence.current, FALSE);
- }
- if (fence->state < NVC0_FENCE_STATE_FLUSHED)
- FIRE_RING(screen->base.channel);
-
- do {
- nvc0_screen_fence_update(screen, FALSE);
-
- if (fence->state == NVC0_FENCE_STATE_SIGNALLED)
- return TRUE;
- spins++;
-#ifdef PIPE_OS_UNIX
- if (!(spins % 8)) /* donate a few cycles */
- sched_yield();
-#endif
- } while (spins < NVC0_FENCE_MAX_SPINS);
-
- debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
- fence->sequence,
- screen->fence.sequence_ack, screen->fence.sequence);
-
- return FALSE;
-}
-
-void
-nvc0_screen_fence_next(struct nvc0_screen *screen)
-{
- nvc0_fence_emit(screen->fence.current);
- nvc0_screen_fence_new(screen, &screen->fence.current, FALSE);
-}
+++ /dev/null
-
-#ifndef __NVC0_FENCE_H__
-#define __NVC0_FENCE_H__
-
-#include "util/u_inlines.h"
-#include "util/u_double_list.h"
-
-#define NVC0_FENCE_STATE_AVAILABLE 0
-#define NVC0_FENCE_STATE_EMITTED 1
-#define NVC0_FENCE_STATE_FLUSHED 2
-#define NVC0_FENCE_STATE_SIGNALLED 3
-
-struct nvc0_mm_allocation;
-
-struct nvc0_fence {
- struct nvc0_fence *next;
- struct nvc0_screen *screen;
- int state;
- int ref;
- uint32_t sequence;
- struct nvc0_mm_allocation *buffers;
-};
-
-void nvc0_fence_emit(struct nvc0_fence *);
-void nvc0_fence_del(struct nvc0_fence *);
-
-boolean nvc0_fence_wait(struct nvc0_fence *);
-boolean nvc0_fence_signalled(struct nvc0_fence *);
-
-static INLINE void
-nvc0_fence_reference(struct nvc0_fence **ref, struct nvc0_fence *fence)
-{
- if (*ref) {
- if (--(*ref)->ref == 0)
- nvc0_fence_del(*ref);
- }
- if (fence)
- ++fence->ref;
-
- *ref = fence;
-}
-
-static INLINE struct nvc0_fence *
-nvc0_fence(struct pipe_fence_handle *fence)
-{
- return (struct nvc0_fence *)fence;
-}
-
-#endif // __NVC0_FENCE_H__
+++ /dev/null
-
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include "util/u_double_list.h"
-
-#include "nvc0_screen.h"
-
-#define MM_MIN_ORDER 7
-#define MM_MAX_ORDER 20
-
-#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
-
-#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
-#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
-
-struct mm_bucket {
- struct list_head free;
- struct list_head used;
- struct list_head full;
- int num_free;
-};
-
-struct nvc0_mman {
- struct nouveau_device *dev;
- struct mm_bucket bucket[MM_NUM_BUCKETS];
- uint32_t storage_type;
- uint32_t domain;
- uint64_t allocated;
-};
-
-struct mm_slab {
- struct list_head head;
- struct nouveau_bo *bo;
- struct nvc0_mman *cache;
- int order;
- int count;
- int free;
- uint32_t bits[0];
-};
-
-static int
-mm_slab_alloc(struct mm_slab *slab)
-{
- int i, n, b;
-
- if (slab->free == 0)
- return -1;
-
- for (i = 0; i < (slab->count + 31) / 32; ++i) {
- b = ffs(slab->bits[i]) - 1;
- if (b >= 0) {
- n = i * 32 + b;
- assert(n < slab->count);
- slab->free--;
- slab->bits[i] &= ~(1 << b);
- return n;
- }
- }
- return -1;
-}
-
-static INLINE void
-mm_slab_free(struct mm_slab *slab, int i)
-{
- assert(i < slab->count);
- slab->bits[i / 32] |= 1 << (i % 32);
- slab->free++;
- assert(slab->free <= slab->count);
-}
-
-static INLINE int
-mm_get_order(uint32_t size)
-{
- int s = __builtin_clz(size) ^ 31;
-
- if (size > (1 << s))
- s += 1;
- return s;
-}
-
-static struct mm_bucket *
-mm_bucket_by_order(struct nvc0_mman *cache, int order)
-{
- if (order > MM_MAX_ORDER)
- return NULL;
- return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
-}
-
-static struct mm_bucket *
-mm_bucket_by_size(struct nvc0_mman *cache, unsigned size)
-{
- return mm_bucket_by_order(cache, mm_get_order(size));
-}
-
-/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
-static INLINE uint32_t
-mm_default_slab_size(unsigned chunk_order)
-{
- static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
- {
- 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
- };
-
- assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
-
- return 1 << slab_order[chunk_order - MM_MIN_ORDER];
-}
-
-static int
-mm_slab_new(struct nvc0_mman *cache, int chunk_order)
-{
- struct mm_slab *slab;
- int words, ret;
- const uint32_t size = mm_default_slab_size(chunk_order);
-
- words = ((size >> chunk_order) + 31) / 32;
- assert(words);
-
- slab = MALLOC(sizeof(struct mm_slab) + words * 4);
- if (!slab)
- return PIPE_ERROR_OUT_OF_MEMORY;
-
- memset(&slab->bits[0], ~0, words * 4);
-
- slab->bo = NULL;
- ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
- 0, cache->storage_type, &slab->bo);
- if (ret) {
- FREE(slab);
- return PIPE_ERROR_OUT_OF_MEMORY;
- }
-
- LIST_INITHEAD(&slab->head);
-
- slab->cache = cache;
- slab->order = chunk_order;
- slab->count = slab->free = size >> chunk_order;
-
- LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
-
- cache->allocated += size;
-
- debug_printf("MM: new slab, total memory = %lu KiB\n",
- cache->allocated / 1024);
-
- return PIPE_OK;
-}
-
-/* @return token to identify slab or NULL if we just allocated a new bo */
-struct nvc0_mm_allocation *
-nvc0_mm_allocate(struct nvc0_mman *cache,
- uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
-{
- struct mm_bucket *bucket;
- struct mm_slab *slab;
- struct nvc0_mm_allocation *alloc;
- int ret;
-
- bucket = mm_bucket_by_size(cache, size);
- if (!bucket) {
- ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
- 0, cache->storage_type, bo);
- if (ret)
- debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
-
- *offset = 0;
- return NULL;
- }
-
- if (!LIST_IS_EMPTY(&bucket->used)) {
- slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
- } else {
- if (LIST_IS_EMPTY(&bucket->free)) {
- mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
- }
- slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
-
- LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->used);
- }
-
- *offset = mm_slab_alloc(slab) << slab->order;
-
- alloc = MALLOC_STRUCT(nvc0_mm_allocation);
- if (!alloc)
- return NULL;
-
- nouveau_bo_ref(slab->bo, bo);
-
- if (slab->free == 0) {
- LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->full);
- }
-
- alloc->next = NULL;
- alloc->offset = *offset;
- alloc->priv = (void *)slab;
-
- return alloc;
-}
-
-void
-nvc0_mm_free(struct nvc0_mm_allocation *alloc)
-{
- struct mm_slab *slab = (struct mm_slab *)alloc->priv;
- struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
-
- mm_slab_free(slab, alloc->offset >> slab->order);
-
- if (slab->free == 1) {
- LIST_DEL(&slab->head);
-
- if (slab->count > 1)
- LIST_ADDTAIL(&slab->head, &bucket->used);
- else
- LIST_ADDTAIL(&slab->head, &bucket->free);
- }
-
- FREE(alloc);
-}
-
-struct nvc0_mman *
-nvc0_mm_create(struct nouveau_device *dev, uint32_t domain,
- uint32_t storage_type)
-{
- struct nvc0_mman *cache = MALLOC_STRUCT(nvc0_mman);
- int i;
-
- if (!cache)
- return NULL;
-
- cache->dev = dev;
- cache->domain = domain;
- cache->storage_type = storage_type;
- cache->allocated = 0;
-
- for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- LIST_INITHEAD(&cache->bucket[i].free);
- LIST_INITHEAD(&cache->bucket[i].used);
- LIST_INITHEAD(&cache->bucket[i].full);
- }
-
- return cache;
-}
-
-static INLINE void
-nvc0_mm_free_slabs(struct list_head *head)
-{
- struct mm_slab *slab, *next;
-
- LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
- LIST_DEL(&slab->head);
- nouveau_bo_ref(NULL, &slab->bo);
- FREE(slab);
- }
-}
-
-void
-nvc0_mm_destroy(struct nvc0_mman *cache)
-{
- int i;
-
- for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
- !LIST_IS_EMPTY(&cache->bucket[i].full))
- debug_printf("WARNING: destroying GPU memory cache "
- "with some buffers still in use\n");
-
- nvc0_mm_free_slabs(&cache->bucket[i].free);
- nvc0_mm_free_slabs(&cache->bucket[i].used);
- nvc0_mm_free_slabs(&cache->bucket[i].full);
- }
-}
-
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
uint8_t *data;
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
- struct nvc0_resource *res = nvc0_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer);
- data = nvc0_resource_map_offset(nvc0, res,
- vb->buffer_offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nvc0->pipe, res,
+ vb->buffer_offset, NOUVEAU_BO_RD);
if (apply_bias && likely(!(nvc0->vertex->instance_bufs & (1 << i))))
data += info->index_bias * vb->stride;
}
if (info->indexed) {
- ctx.idxbuf = nvc0_resource_map_offset(nvc0,
- nvc0_resource(nvc0->idxbuf.buffer),
- nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+ ctx.idxbuf = nouveau_resource_map_offset(&nvc0->pipe,
+ nv04_resource(nvc0->idxbuf.buffer),
+ nvc0->idxbuf.offset, NOUVEAU_BO_RD);
if (!ctx.idxbuf)
return;
index_size = nvc0->idxbuf.index_size;
}
if (info->indexed)
- nvc0_resource_unmap(nvc0_resource(nvc0->idxbuf.buffer));
+ nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
- nvc0_resource_unmap(nvc0_resource(nvc0->vtxbuf[i].buffer));
+ nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
}
uint32_t offset; /* base + i * 16 */
boolean ready;
boolean is64bit;
- struct nvc0_mm_allocation *mm;
+ struct nouveau_mm_allocation *mm;
};
#define NVC0_QUERY_ALLOC_SPACE 128
nouveau_bo_ref(NULL, &q->bo);
if (q->mm) {
if (q->ready)
- nvc0_mm_free(q->mm);
+ nouveau_mm_free(q->mm);
else
- nvc0_fence_sched_release(screen->fence.current, q->mm);
+ nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, q->mm);
}
}
if (size) {
- q->mm = nvc0_mm_allocate(screen->mm_GART, size, &q->bo, &q->base);
+ q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
if (!q->bo)
return FALSE;
q->offset = q->base;
struct pipe_resource *resource,
unsigned face, int layer)
{
- struct nvc0_resource *res = nvc0_resource(resource);
+ struct nv04_resource *res = nv04_resource(resource);
unsigned flags = 0;
#ifdef NOUVEAU_USERSPACE_MM
{
switch (templ->target) {
case PIPE_BUFFER:
- return nvc0_buffer_create(screen, templ);
+ return nouveau_buffer_create(screen, templ);
default:
return nvc0_miptree_create(screen, templ);
}
pscreen->resource_from_handle = nvc0_resource_from_handle;
pscreen->resource_get_handle = u_resource_get_handle_vtbl;
pscreen->resource_destroy = u_resource_destroy_vtbl;
- pscreen->user_buffer_create = nvc0_user_buffer_create;
+ pscreen->user_buffer_create = nouveau_user_buffer_create;
}
#include "util/u_double_list.h"
#define NOUVEAU_NVC0
#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_fence.h"
+#include "nouveau/nouveau_buffer.h"
#undef NOUVEAU_NVC0
-#include "nvc0_fence.h"
-
-struct pipe_resource;
-struct nouveau_bo;
-struct nvc0_context;
-
-#define NVC0_BUFFER_SCORE_MIN -25000
-#define NVC0_BUFFER_SCORE_MAX 25000
-#define NVC0_BUFFER_SCORE_VRAM_THRESHOLD 20000
-
-/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
- * resource->data has not been updated to reflect modified VRAM contents
- *
- * USER_MEMORY: resource->data is a pointer to client memory and may change
- * between GL calls
- */
-#define NVC0_BUFFER_STATUS_GPU_READING (1 << 0)
-#define NVC0_BUFFER_STATUS_GPU_WRITING (1 << 1)
-#define NVC0_BUFFER_STATUS_USER_MEMORY (1 << 7)
-
-/* Resources, if mapped into the GPU's address space, are guaranteed to
- * have constant virtual addresses.
- * The address of a resource will lie within the nouveau_bo referenced,
- * and this bo should be added to the memory manager's validation list.
- */
-struct nvc0_resource {
- struct pipe_resource base;
- const struct u_resource_vtbl *vtbl;
-
- uint8_t *data;
- struct nouveau_bo *bo;
- uint32_t offset;
-
- uint8_t status;
- uint8_t domain;
-
- int16_t score; /* low if mapped very often, if high can move to VRAM */
-
- struct nvc0_fence *fence;
- struct nvc0_fence *fence_wr;
-
- struct nvc0_mm_allocation *mm;
-};
-
void
-nvc0_buffer_release_gpu_storage(struct nvc0_resource *);
-
-boolean
-nvc0_buffer_download(struct nvc0_context *, struct nvc0_resource *,
- unsigned start, unsigned size);
-
-boolean
-nvc0_buffer_migrate(struct nvc0_context *,
- struct nvc0_resource *, unsigned domain);
-
-static INLINE void
-nvc0_buffer_adjust_score(struct nvc0_context *nvc0, struct nvc0_resource *res,
- int16_t score)
-{
- if (score < 0) {
- if (res->score > NVC0_BUFFER_SCORE_MIN)
- res->score += score;
- } else
- if (score > 0){
- if (res->score < NVC0_BUFFER_SCORE_MAX)
- res->score += score;
- if (res->domain == NOUVEAU_BO_GART &&
- res->score > NVC0_BUFFER_SCORE_VRAM_THRESHOLD)
- nvc0_buffer_migrate(nvc0, res, NOUVEAU_BO_VRAM);
- }
-}
-
-/* XXX: wait for fence (atm only using this for vertex push) */
-static INLINE void *
-nvc0_resource_map_offset(struct nvc0_context *nvc0,
- struct nvc0_resource *res, uint32_t offset,
- uint32_t flags)
-{
- void *map;
-
- nvc0_buffer_adjust_score(nvc0, res, -250);
-
- if ((res->domain == NOUVEAU_BO_VRAM) &&
- (res->status & NVC0_BUFFER_STATUS_GPU_WRITING))
- nvc0_buffer_download(nvc0, res, 0, res->base.width0);
-
- if ((res->domain != NOUVEAU_BO_GART) ||
- (res->status & NVC0_BUFFER_STATUS_USER_MEMORY))
- return res->data + offset;
-
- if (res->mm)
- flags |= NOUVEAU_BO_NOSYNC;
-
- if (nouveau_bo_map_range(res->bo, res->offset + offset,
- res->base.width0, flags))
- return NULL;
-
- map = res->bo->map;
- nouveau_bo_unmap(res->bo);
- return map;
-}
+nvc0_init_resource_functions(struct pipe_context *pcontext);
-static INLINE void
-nvc0_resource_unmap(struct nvc0_resource *res)
-{
- /* no-op */
-}
+void
+nvc0_screen_init_resource_functions(struct pipe_screen *pscreen);
#define NVC0_TILE_DIM_SHIFT(m, d) (((m) >> (d * 4)) & 0xf)
#define NVC0_MAX_TEXTURE_LEVELS 16
struct nvc0_miptree {
- struct nvc0_resource base;
+ struct nv04_resource base;
struct nvc0_miptree_level level[NVC0_MAX_TEXTURE_LEVELS];
uint32_t total_size;
uint32_t layer_stride;
return (struct nvc0_miptree *)pt;
}
-static INLINE struct nvc0_resource *
-nvc0_resource(struct pipe_resource *resource)
-{
- return (struct nvc0_resource *)resource;
-}
-
-/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
-static INLINE boolean
-nvc0_resource_mapped_by_gpu(struct pipe_resource *resource)
-{
- return nvc0_resource(resource)->domain != 0;
-}
-
-void
-nvc0_init_resource_functions(struct pipe_context *pcontext);
-
-void
-nvc0_screen_init_resource_functions(struct pipe_screen *pscreen);
-
/* Internal functions:
*/
struct pipe_resource *
const struct pipe_resource *template,
struct winsys_handle *whandle);
-struct pipe_resource *
-nvc0_buffer_create(struct pipe_screen *pscreen,
- const struct pipe_resource *templ);
-
-struct pipe_resource *
-nvc0_user_buffer_create(struct pipe_screen *screen,
- void *ptr,
- unsigned bytes,
- unsigned usage);
-
-
struct pipe_surface *
nvc0_miptree_surface_new(struct pipe_context *,
struct pipe_resource *,
void
nvc0_miptree_surface_del(struct pipe_context *, struct pipe_surface *);
-boolean
-nvc0_user_buffer_upload(struct nvc0_resource *, unsigned base, unsigned size);
-
#endif
#include "util/u_format_s3tc.h"
#include "pipe/p_screen.h"
-#include "nvc0_fence.h"
#include "nvc0_context.h"
#include "nvc0_screen.h"
{
struct nvc0_screen *screen = nvc0_screen(pscreen);
- nvc0_fence_wait(screen->fence.current);
- nvc0_fence_reference(&screen->fence.current, NULL);
+ nouveau_fence_wait(screen->base.fence.current);
+ nouveau_fence_ref(NULL, &screen->base.fence.current);
nouveau_bo_ref(NULL, &screen->text);
nouveau_bo_ref(NULL, &screen->tls);
if (screen->tic.entries)
FREE(screen->tic.entries);
- nvc0_mm_destroy(screen->mm_GART);
- nvc0_mm_destroy(screen->mm_VRAM);
- nvc0_mm_destroy(screen->mm_VRAM_fe0);
+ nouveau_mm_destroy(screen->mm_VRAM_fe0);
nouveau_grobj_free(&screen->fermi);
nouveau_grobj_free(&screen->eng2d);
return pos + size;
}
-static void
-nvc0_screen_fence_reference(struct pipe_screen *pscreen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
-{
- nvc0_fence_reference((struct nvc0_fence **)ptr, nvc0_fence(fence));
-}
-
-static int
-nvc0_screen_fence_signalled(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence,
- unsigned flags)
-{
- return !(nvc0_fence_signalled(nvc0_fence(fence)));
-}
-
-static int
-nvc0_screen_fence_finish(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence,
- unsigned flags)
-{
- return nvc0_fence_wait((struct nvc0_fence *)fence) != TRUE;
-}
-
static void
nvc0_magic_3d_init(struct nouveau_channel *chan)
{
OUT_RING (chan, 0);
}
+static void
+nvc0_screen_fence_emit(struct pipe_screen *pscreen, u32 sequence)
+{
+ struct nvc0_screen *screen = nvc0_screen(pscreen);
+ struct nouveau_channel *chan = screen->base.channel;
+
+ MARK_RING (chan, 5, 2);
+ BEGIN_RING(chan, RING_3D(QUERY_ADDRESS_HIGH), 4);
+ OUT_RELOCh(chan, screen->fence.bo, 0, NOUVEAU_BO_WR);
+ OUT_RELOCl(chan, screen->fence.bo, 0, NOUVEAU_BO_WR);
+ OUT_RING (chan, sequence);
+ OUT_RING (chan, NVC0_3D_QUERY_GET_FENCE | NVC0_3D_QUERY_GET_SHORT |
+ (0xf << NVC0_3D_QUERY_GET_UNIT__SHIFT));
+}
+
+static u32
+nvc0_screen_fence_update(struct pipe_screen *pscreen)
+{
+ struct nvc0_screen *screen = nvc0_screen(pscreen);
+ return screen->fence.map[0];
+}
+
#define FAIL_SCREEN_INIT(str, err) \
do { \
NOUVEAU_ERR(str, err); \
pscreen->get_param = nvc0_screen_get_param;
pscreen->get_shader_param = nvc0_screen_get_shader_param;
pscreen->get_paramf = nvc0_screen_get_paramf;
- pscreen->fence_reference = nvc0_screen_fence_reference;
- pscreen->fence_signalled = nvc0_screen_fence_signalled;
- pscreen->fence_finish = nvc0_screen_fence_finish;
nvc0_screen_init_resource_functions(pscreen);
screen->base.vertex_buffer_flags = NOUVEAU_BO_GART;
screen->base.index_buffer_flags = 0;
+ screen->base.copy_data = nvc0_m2mf_copy_linear;
+ screen->base.push_data = nvc0_m2mf_push_linear;
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
&screen->fence.bo);
nouveau_bo_map(screen->fence.bo, NOUVEAU_BO_RDWR);
screen->fence.map = screen->fence.bo->map;
nouveau_bo_unmap(screen->fence.bo);
+ screen->base.fence.emit = nvc0_screen_fence_emit;
+ screen->base.fence.update = nvc0_screen_fence_update;
for (i = 0; i < NVC0_SCRATCH_NR_BUFFERS; ++i) {
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART, 0, NVC0_SCRATCH_SIZE,
screen->tic.entries = CALLOC(4096, sizeof(void *));
screen->tsc.entries = screen->tic.entries + 2048;
- screen->mm_GART = nvc0_mm_create(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
- 0x000);
- screen->mm_VRAM = nvc0_mm_create(dev, NOUVEAU_BO_VRAM, 0x000);
- screen->mm_VRAM_fe0 = nvc0_mm_create(dev, NOUVEAU_BO_VRAM, 0xfe0);
+ screen->mm_VRAM_fe0 = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, 0xfe0);
- nvc0_screen_fence_new(screen, &screen->fence.current, FALSE);
+ nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE);
return pscreen;
#define NOUVEAU_NVC0
#include "nouveau/nouveau_screen.h"
+#include "nouveau/nouveau_mm.h"
#undef NOUVEAU_NVC0
#include "nvc0_winsys.h"
#include "nvc0_stateobj.h"
#define NVC0_TIC_MAX_ENTRIES 2048
#define NVC0_TSC_MAX_ENTRIES 2048
-struct nvc0_mman;
struct nvc0_context;
-struct nvc0_fence;
#define NVC0_SCRATCH_SIZE (2 << 20)
#define NVC0_SCRATCH_NR_BUFFERS 2
} tsc;
struct {
- uint32_t *map;
- struct nvc0_fence *head;
- struct nvc0_fence *tail;
- struct nvc0_fence *current;
- uint32_t sequence;
- uint32_t sequence_ack;
struct nouveau_bo *bo;
+ uint32_t *map;
} fence;
- struct nvc0_mman *mm_GART;
- struct nvc0_mman *mm_VRAM;
- struct nvc0_mman *mm_VRAM_fe0;
+ struct nouveau_mman *mm_VRAM_fe0;
struct nouveau_grobj *fermi;
struct nouveau_grobj *eng2d;
return (struct nvc0_screen *)screen;
}
-/* Since a resource can be migrated, we need to decouple allocations from
- * them. This struct is linked with fences for delayed freeing of allocs.
- */
-struct nvc0_mm_allocation {
- struct nvc0_mm_allocation *next;
- void *priv;
- uint32_t offset;
-};
-
-static INLINE void
-nvc0_fence_sched_release(struct nvc0_fence *nf, struct nvc0_mm_allocation *mm)
-{
- mm->next = nf->buffers;
- nf->buffers = mm;
-}
-
-extern struct nvc0_mman *
-nvc0_mm_create(struct nouveau_device *, uint32_t domain, uint32_t storage_type);
-
-extern void
-nvc0_mm_destroy(struct nvc0_mman *);
-
-extern struct nvc0_mm_allocation *
-nvc0_mm_allocate(struct nvc0_mman *,
- uint32_t size, struct nouveau_bo **, uint32_t *offset);
-extern void
-nvc0_mm_free(struct nvc0_mm_allocation *);
-
void nvc0_screen_make_buffers_resident(struct nvc0_screen *);
int nvc0_screen_tic_alloc(struct nvc0_screen *, void *);
int nvc0_screen_tsc_alloc(struct nvc0_screen *, void *);
static INLINE void
-nvc0_resource_fence(struct nvc0_resource *res, uint32_t flags)
+nvc0_resource_fence(struct nv04_resource *res, uint32_t flags)
{
struct nvc0_screen *screen = nvc0_screen(res->base.screen);
if (res->mm) {
- nvc0_fence_reference(&res->fence, screen->fence.current);
+ nouveau_fence_ref(screen->base.fence.current, &res->fence);
if (flags & NOUVEAU_BO_WR)
- nvc0_fence_reference(&res->fence_wr, screen->fence.current);
+ nouveau_fence_ref(screen->base.fence.current, &res->fence_wr);
}
}
static INLINE void
-nvc0_resource_validate(struct nvc0_resource *res, uint32_t flags)
+nvc0_resource_validate(struct nv04_resource *res, uint32_t flags)
{
struct nvc0_screen *screen = nvc0_screen(res->base.screen);
nouveau_bo_validate(screen->base.channel, res->bo, flags);
if (flags & NOUVEAU_BO_WR)
- res->status |= NVC0_BUFFER_STATUS_GPU_WRITING;
+ res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
if (flags & NOUVEAU_BO_RD)
- res->status |= NVC0_BUFFER_STATUS_GPU_READING;
+ res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
nvc0_resource_fence(res, flags);
}
}
-
-boolean
-nvc0_screen_fence_new(struct nvc0_screen *, struct nvc0_fence **, boolean emit);
-void
-nvc0_screen_fence_next(struct nvc0_screen *);
-void
-nvc0_screen_fence_update(struct nvc0_screen *, boolean flushed);
-
-static INLINE boolean
-nvc0_screen_fence_emit(struct nvc0_screen *screen)
-{
- nvc0_fence_emit(screen->fence.current);
-
- return nvc0_screen_fence_new(screen, &screen->fence.current, FALSE);
-}
-
struct nvc0_format {
uint32_t rt;
uint32_t tic;
prog->code_base = prog->res->start;
- nvc0_m2mf_push_linear(nvc0, nvc0->screen->text, NOUVEAU_BO_VRAM,
- prog->code_base, NVC0_SHADER_HEADER_SIZE, prog->hdr);
- nvc0_m2mf_push_linear(nvc0, nvc0->screen->text, NOUVEAU_BO_VRAM,
+ nvc0_m2mf_push_linear(&nvc0->pipe, nvc0->screen->text, prog->code_base,
+ NOUVEAU_BO_VRAM, NVC0_SHADER_HEADER_SIZE, prog->hdr);
+ nvc0_m2mf_push_linear(&nvc0->pipe, nvc0->screen->text,
prog->code_base + NVC0_SHADER_HEADER_SIZE,
- prog->code_size, prog->code);
+ NOUVEAU_BO_VRAM, prog->code_size, prog->code);
BEGIN_RING(nvc0->screen->base.channel, RING_3D(MEM_BARRIER), 1);
OUT_RING (nvc0->screen->base.channel, 0x1111);
for (b = 0; b < nvc0->num_tfbbufs; ++b) {
uint8_t idx, var[128];
int i, n;
- struct nvc0_resource *buf = nvc0_resource(nvc0->tfbbuf[b]);
+ struct nv04_resource *buf = nv04_resource(nvc0->tfbbuf[b]);
BEGIN_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 5);
OUT_RING (chan, 1);
if (nvc0->constbuf[shader][index])
nvc0_bufctx_del_resident(nvc0, NVC0_BUFCTX_CONSTANT,
- nvc0_resource(
- nvc0->constbuf[shader][index]));
+ nv04_resource(nvc0->constbuf[shader][index]));
pipe_resource_reference(&nvc0->constbuf[shader][index], res);
OUT_RING (chan, sf->depth);
OUT_RING (chan, mt->layer_stride >> 2);
- if (mt->base.status & NVC0_BUFFER_STATUS_GPU_READING)
+ if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
serialize = TRUE;
- mt->base.status |= NVC0_BUFFER_STATUS_GPU_WRITING;
- mt->base.status &= ~NVC0_BUFFER_STATUS_GPU_READING;
+ mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+ mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, &mt->base,
NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
OUT_RING (chan, sf->height);
OUT_RING (chan, (unk << 16) | sf->depth);
- if (mt->base.status & NVC0_BUFFER_STATUS_GPU_READING)
+ if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
serialize = TRUE;
- mt->base.status |= NVC0_BUFFER_STATUS_GPU_WRITING;
- mt->base.status &= ~NVC0_BUFFER_STATUS_GPU_READING;
+ mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+ mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, &mt->base,
NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
unsigned s;
for (s = 0; s < 5; ++s) {
- struct nvc0_resource *res;
+ struct nv04_resource *res;
int i;
while (nvc0->constbuf_dirty[s]) {
i = ffs(nvc0->constbuf_dirty[s]) - 1;
nvc0->constbuf_dirty[s] &= ~(1 << i);
- res = nvc0_resource(nvc0->constbuf[s][i]);
+ res = nv04_resource(nvc0->constbuf[s][i]);
if (!res) {
BEGIN_RING(chan, RING_3D(CB_BIND(s)), 1);
OUT_RING (chan, (i << 4) | 0);
continue;
}
- if (!nvc0_resource_mapped_by_gpu(&res->base)) {
+ if (!nouveau_resource_mapped_by_gpu(&res->base)) {
if (i == 0) {
base = s << 16;
bo = nvc0->screen->uniforms;
void
nvc0_init_surface_functions(struct nvc0_context *nvc0)
{
- nvc0->pipe.resource_copy_region = nvc0_resource_copy_region;
- nvc0->pipe.clear_render_target = nvc0_clear_render_target;
- nvc0->pipe.clear_depth_stencil = nvc0_clear_depth_stencil;
+ nvc0->pipe.resource_copy_region = nvc0_resource_copy_region;
+ nvc0->pipe.clear_render_target = nvc0_clear_render_target;
+ nvc0->pipe.clear_depth_stencil = nvc0_clear_depth_stencil;
}
for (i = 0; i < nvc0->num_textures[s]; ++i) {
struct nvc0_tic_entry *tic = nvc0_tic_entry(nvc0->textures[s][i]);
- struct nvc0_resource *res;
+ struct nv04_resource *res;
if (!tic) {
BEGIN_RING(chan, RING_3D(BIND_TIC(s)), 1);
need_flush = TRUE;
} else
- if (res->status & NVC0_BUFFER_STATUS_GPU_WRITING) {
+ if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_RING(chan, RING_3D(TEX_CACHE_CTL), 1);
OUT_RING (chan, (tic->id << 4) | 1);
}
nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
- res->status &= ~NVC0_BUFFER_STATUS_GPU_WRITING;
- res->status |= NVC0_BUFFER_STATUS_GPU_READING;
+ res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+ res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TEXTURES, res,
NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
if (tsc->id < 0) {
tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
- nvc0_m2mf_push_linear(nvc0, nvc0->screen->txc, NOUVEAU_BO_VRAM,
- 65536 + tsc->id * 32, 32, tsc->tsc);
+ nvc0_m2mf_push_linear(&nvc0->pipe, nvc0->screen->txc,
+ 65536 + tsc->id * 32, NOUVEAU_BO_VRAM,
+ 32, tsc->tsc);
need_flush = TRUE;
}
nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
}
void
-nvc0_m2mf_push_linear(struct nvc0_context *nvc0,
- struct nouveau_bo *dst, unsigned domain, int offset,
+nvc0_m2mf_push_linear(struct pipe_context *pipe,
+ struct nouveau_bo *dst, unsigned offset, unsigned domain,
unsigned size, void *data)
{
+ struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_channel *chan = nvc0->screen->base.channel;
uint32_t *src = (uint32_t *)data;
unsigned count = (size + 3) / 4;
}
void
-nvc0_m2mf_copy_linear(struct nvc0_context *nvc0,
+nvc0_m2mf_copy_linear(struct pipe_context *pipe,
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
unsigned size)
{
+ struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_channel *chan = nvc0->screen->base.channel;
while (size) {
{
const void *data;
struct nouveau_channel *chan = nvc0->screen->base.channel;
- struct nvc0_resource *res = nvc0_resource(vb->buffer);
+ struct nv04_resource *res = nv04_resource(vb->buffer);
float v[4];
int i;
const unsigned nc = util_format_get_nr_components(ve->src_format);
- data = nvc0_resource_map_offset(nvc0, res, vb->buffer_offset +
- ve->src_offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nvc0->pipe, res, vb->buffer_offset +
+ ve->src_offset, NOUVEAU_BO_RD);
util_format_read_4f(ve->src_format, v, 0, data, 0, 0, 0, 1, 1);
nvc0_prevalidate_vbufs(struct nvc0_context *nvc0)
{
struct pipe_vertex_buffer *vb;
- struct nvc0_resource *buf;
+ struct nv04_resource *buf;
int i;
uint32_t base, size;
vb = &nvc0->vtxbuf[i];
if (!vb->stride)
continue;
- buf = nvc0_resource(vb->buffer);
+ buf = nv04_resource(vb->buffer);
/* NOTE: user buffers with temporary storage count as mapped by GPU */
- if (!nvc0_resource_mapped_by_gpu(vb->buffer)) {
+ if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
if (nvc0->vbo_push_hint) {
nvc0->vbo_fifo = ~0;
continue;
} else {
- if (buf->status & NVC0_BUFFER_STATUS_USER_MEMORY) {
+ if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
nvc0->vbo_user |= 1 << i;
assert(vb->stride > vb->buffer_offset);
nvc0_vbuf_range(nvc0, i, &base, &size);
- nvc0_user_buffer_upload(buf, base, size);
+ nouveau_user_buffer_upload(buf, base, size);
} else {
- nvc0_buffer_migrate(nvc0, buf, NOUVEAU_BO_GART);
+ nouveau_buffer_migrate(&nvc0->pipe, buf, NOUVEAU_BO_GART);
}
nvc0->vbo_dirty = TRUE;
}
}
nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_VERTEX, buf, NOUVEAU_BO_RD);
- nvc0_buffer_adjust_score(nvc0, buf, 1);
+ nouveau_buffer_adjust_score(&nvc0->pipe, buf, 1);
}
}
struct pipe_vertex_element *ve = &nvc0->vertex->element[i].pipe;
const int b = ve->vertex_buffer_index;
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[b];
- struct nvc0_resource *buf = nvc0_resource(vb->buffer);
+ struct nv04_resource *buf = nv04_resource(vb->buffer);
if (!(nvc0->vbo_user & (1 << b)))
continue;
if (!(written & (1 << b))) {
written |= 1 << b;
- nvc0_user_buffer_upload(buf, base, size);
+ nouveau_user_buffer_upload(buf, base, size);
}
offset = vb->buffer_offset + ve->src_offset;
int i = ffs(vbo_user) - 1;
vbo_user &= ~(1 << i);
- nvc0_buffer_release_gpu_storage(nvc0_resource(nvc0->vtxbuf[i].buffer));
+ nouveau_buffer_release_gpu_storage(nv04_resource(nvc0->vtxbuf[i].buffer));
}
}
}
for (i = 0; i < vertex->num_elements; ++i) {
- struct nvc0_resource *res;
+ struct nv04_resource *res;
unsigned size, offset;
ve = &vertex->element[i];
IMMED_RING(chan, RING_3D(VERTEX_ARRAY_PER_INSTANCE(i)), 0);
}
- res = nvc0_resource(vb->buffer);
+ res = nv04_resource(vb->buffer);
if (nvc0->vbo_fifo || unlikely(vb->stride == 0)) {
if (!nvc0->vbo_fifo)
{
struct nvc0_context *nvc0 = chan->user_private;
- nvc0_screen_fence_update(nvc0->screen, TRUE);
+ nouveau_fence_update(&nvc0->screen->base, TRUE);
nvc0_bufctx_emit_relocs(nvc0);
}
nvc0->state.index_bias = index_bias;
}
- if (nvc0_resource_mapped_by_gpu(nvc0->idxbuf.buffer)) {
- struct nvc0_resource *res = nvc0_resource(nvc0->idxbuf.buffer);
+ if (nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer)) {
+ struct nv04_resource *res = nv04_resource(nvc0->idxbuf.buffer);
unsigned offset = nvc0->idxbuf.offset;
unsigned limit = nvc0->idxbuf.buffer->width0 - 1;
- nvc0_buffer_adjust_score(nvc0, res, 1);
+ nouveau_buffer_adjust_score(&nvc0->pipe, res, 1);
while (instance_count--) {
MARK_RING (chan, 11, 4);
mode |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
- data = nvc0_resource_map_offset(nvc0, nvc0_resource(nvc0->idxbuf.buffer),
- nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+ data = nouveau_resource_map_offset(&nvc0->pipe,
+ nv04_resource(nvc0->idxbuf.buffer),
+ nvc0->idxbuf.offset, NOUVEAU_BO_RD);
if (!data)
return;
}
static INLINE int
-OUT_RESRCh(struct nouveau_channel *chan, struct nvc0_resource *res,
+OUT_RESRCh(struct nouveau_channel *chan, struct nv04_resource *res,
unsigned delta, unsigned flags)
{
return OUT_RELOCh(chan, res->bo, res->offset + delta, res->domain | flags);
}
static INLINE int
-OUT_RESRCl(struct nouveau_channel *chan, struct nvc0_resource *res,
+OUT_RESRCl(struct nouveau_channel *chan, struct nv04_resource *res,
unsigned delta, unsigned flags)
{
if (flags & NOUVEAU_BO_WR)
- res->status |= NVC0_BUFFER_STATUS_GPU_WRITING;
+ res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
return OUT_RELOCl(chan, res->bo, res->offset + delta, res->domain | flags);
}