-I$(TOP)/src/gallium/drivers/nouveau/include
C_SOURCES = nouveau_screen.c \
- nouveau_fence.c
+ nouveau_fence.c \
+ nouveau_mm.c
include ../../Makefile.template
--- /dev/null
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "nouveau_screen.h"
+#include "nouveau_mm.h"
+
+#include "nouveau/nouveau_bo.h"
+
+#define MM_MIN_ORDER 7
+#define MM_MAX_ORDER 20
+
+#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
+
+#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
+#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
+
+struct mm_bucket {
+ struct list_head free;
+ struct list_head used;
+ struct list_head full;
+ int num_free;
+};
+
+struct nouveau_mman {
+ struct nouveau_device *dev;
+ struct mm_bucket bucket[MM_NUM_BUCKETS];
+ uint32_t storage_type;
+ uint32_t domain;
+ uint64_t allocated;
+};
+
+struct mm_slab {
+ struct list_head head;
+ struct nouveau_bo *bo;
+ struct nouveau_mman *cache;
+ int order;
+ int count;
+ int free;
+ uint32_t bits[0];
+};
+
+static int
+mm_slab_alloc(struct mm_slab *slab)
+{
+ int i, n, b;
+
+ if (slab->free == 0)
+ return -1;
+
+ for (i = 0; i < (slab->count + 31) / 32; ++i) {
+ b = ffs(slab->bits[i]) - 1;
+ if (b >= 0) {
+ n = i * 32 + b;
+ assert(n < slab->count);
+ slab->free--;
+ slab->bits[i] &= ~(1 << b);
+ return n;
+ }
+ }
+ return -1;
+}
+
+static INLINE void
+mm_slab_free(struct mm_slab *slab, int i)
+{
+ assert(i < slab->count);
+ slab->bits[i / 32] |= 1 << (i % 32);
+ slab->free++;
+ assert(slab->free <= slab->count);
+}
+
+static INLINE int
+mm_get_order(uint32_t size)
+{
+ int s = __builtin_clz(size) ^ 31;
+
+ if (size > (1 << s))
+ s += 1;
+ return s;
+}
+
+static struct mm_bucket *
+mm_bucket_by_order(struct nouveau_mman *cache, int order)
+{
+ if (order > MM_MAX_ORDER)
+ return NULL;
+ return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
+}
+
+static struct mm_bucket *
+mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
+{
+ return mm_bucket_by_order(cache, mm_get_order(size));
+}
+
+/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
+static INLINE uint32_t
+mm_default_slab_size(unsigned chunk_order)
+{
+ static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
+ {
+ 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
+ };
+
+ assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
+
+ return 1 << slab_order[chunk_order - MM_MIN_ORDER];
+}
+
+static int
+mm_slab_new(struct nouveau_mman *cache, int chunk_order)
+{
+ struct mm_slab *slab;
+ int words, ret;
+ const uint32_t size = mm_default_slab_size(chunk_order);
+
+ words = ((size >> chunk_order) + 31) / 32;
+ assert(words);
+
+ slab = MALLOC(sizeof(struct mm_slab) + words * 4);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memset(&slab->bits[0], ~0, words * 4);
+
+ slab->bo = NULL;
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, &slab->bo);
+ if (ret) {
+ FREE(slab);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ LIST_INITHEAD(&slab->head);
+
+ slab->cache = cache;
+ slab->order = chunk_order;
+ slab->count = slab->free = size >> chunk_order;
+
+ LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
+
+ cache->allocated += size;
+
+ debug_printf("MM: new slab, total memory = %lu KiB\n",
+ cache->allocated / 1024);
+
+ return PIPE_OK;
+}
+
+/* @return token to identify slab or NULL if we just allocated a new bo */
+struct nouveau_mm_allocation *
+nouveau_mm_allocate(struct nouveau_mman *cache,
+ uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
+{
+ struct mm_bucket *bucket;
+ struct mm_slab *slab;
+ struct nouveau_mm_allocation *alloc;
+ int ret;
+
+ bucket = mm_bucket_by_size(cache, size);
+ if (!bucket) {
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, bo);
+ if (ret)
+ debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
+
+ *offset = 0;
+ return NULL;
+ }
+
+ if (!LIST_IS_EMPTY(&bucket->used)) {
+ slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
+ } else {
+ if (LIST_IS_EMPTY(&bucket->free)) {
+ mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
+ }
+ slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
+
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->used);
+ }
+
+ *offset = mm_slab_alloc(slab) << slab->order;
+
+ alloc = MALLOC_STRUCT(nouveau_mm_allocation);
+ if (!alloc)
+ return NULL;
+
+ nouveau_bo_ref(slab->bo, bo);
+
+ if (slab->free == 0) {
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->full);
+ }
+
+ alloc->next = NULL;
+ alloc->offset = *offset;
+ alloc->priv = (void *)slab;
+
+ return alloc;
+}
+
+void
+nouveau_mm_free(struct nouveau_mm_allocation *alloc)
+{
+ struct mm_slab *slab = (struct mm_slab *)alloc->priv;
+ struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
+
+ mm_slab_free(slab, alloc->offset >> slab->order);
+
+ if (slab->free == 1) {
+ LIST_DEL(&slab->head);
+
+ if (slab->count > 1)
+ LIST_ADDTAIL(&slab->head, &bucket->used);
+ else
+ LIST_ADDTAIL(&slab->head, &bucket->free);
+ }
+
+ FREE(alloc);
+}
+
+struct nouveau_mman *
+nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
+ uint32_t storage_type)
+{
+ struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
+ int i;
+
+ if (!cache)
+ return NULL;
+
+ cache->dev = dev;
+ cache->domain = domain;
+ cache->storage_type = storage_type;
+ cache->allocated = 0;
+
+ for (i = 0; i < MM_NUM_BUCKETS; ++i) {
+ LIST_INITHEAD(&cache->bucket[i].free);
+ LIST_INITHEAD(&cache->bucket[i].used);
+ LIST_INITHEAD(&cache->bucket[i].full);
+ }
+
+ return cache;
+}
+
+static INLINE void
+nouveau_mm_free_slabs(struct list_head *head)
+{
+ struct mm_slab *slab, *next;
+
+ LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
+ LIST_DEL(&slab->head);
+ nouveau_bo_ref(NULL, &slab->bo);
+ FREE(slab);
+ }
+}
+
+void
+nouveau_mm_destroy(struct nouveau_mman *cache)
+{
+ int i;
+
+ if (!cache)
+ return;
+
+ for (i = 0; i < MM_NUM_BUCKETS; ++i) {
+ if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
+ !LIST_IS_EMPTY(&cache->bucket[i].full))
+ debug_printf("WARNING: destroying GPU memory cache "
+ "with some buffers still in use\n");
+
+ nouveau_mm_free_slabs(&cache->bucket[i].free);
+ nouveau_mm_free_slabs(&cache->bucket[i].used);
+ nouveau_mm_free_slabs(&cache->bucket[i].full);
+ }
+}
+
--- /dev/null
+#ifndef __NOUVEAU_MM_H__
+#define __NOUVEAU_MM_H__
+
+struct nouveau_mman;
+
+/* Since a resource can be migrated, we need to decouple allocations from
+ * them. This struct is linked with fences for delayed freeing of allocs.
+ */
+struct nouveau_mm_allocation {
+ struct nouveau_mm_allocation *next;
+ void *priv;
+ uint32_t offset;
+};
+
+extern struct nouveau_mman *
+nouveau_mm_create(struct nouveau_device *, uint32_t domain,
+ uint32_t storage_type);
+
+extern void
+nouveau_mm_destroy(struct nouveau_mman *);
+
+extern struct nouveau_mm_allocation *
+nouveau_mm_allocate(struct nouveau_mman *, uint32_t size,
+ struct nouveau_bo **, uint32_t *offset);
+
+extern void
+nouveau_mm_free(struct nouveau_mm_allocation *);
+
+#endif // __NOUVEAU_MM_H__
nv50_pc_optimize.c \
nv50_pc_regalloc.c \
nv50_push.c \
- nv50_mm.c \
nv50_query.c
LIBRARY_INCLUDES = \
#define NOUVEAU_NVC0
#include "nouveau/nouveau_screen.h"
#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_mm.h"
#undef NOUVEAU_NVC0
#include "nv50_context.h"
unsigned domain)
{
if (domain == NOUVEAU_BO_VRAM) {
- buf->mm = nv50_mm_allocate(screen->mm_VRAM, buf->base.width0, &buf->bo,
- &buf->offset);
+ buf->mm = nouveau_mm_allocate(screen->mm_VRAM, buf->base.width0, &buf->bo,
+ &buf->offset);
if (!buf->bo)
return nv50_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
} else
if (domain == NOUVEAU_BO_GART) {
- buf->mm = nv50_mm_allocate(screen->mm_GART, buf->base.width0, &buf->bo,
- &buf->offset);
+ buf->mm = nouveau_mm_allocate(screen->mm_GART, buf->base.width0, &buf->bo,
+ &buf->offset);
if (!buf->bo)
return FALSE;
}
}
static INLINE void
-release_allocation(struct nv50_mm_allocation **mm, struct nouveau_fence *fence)
+release_allocation(struct nouveau_mm_allocation **mm, struct nouveau_fence *fence)
{
- nouveau_fence_work(fence, nv50_mm_free, *mm);
+ nouveau_fence_work(fence, nouveau_mm_free, *mm);
(*mm) = NULL;
}
nv50_buffer_download(struct nv50_context *nv50, struct nv50_resource *buf,
unsigned start, unsigned size)
{
- struct nv50_mm_allocation *mm;
+ struct nouveau_mm_allocation *mm;
struct nouveau_bo *bounce = NULL;
uint32_t offset;
assert(buf->domain == NOUVEAU_BO_VRAM);
- mm = nv50_mm_allocate(nv50->screen->mm_GART, size, &bounce, &offset);
+ mm = nouveau_mm_allocate(nv50->screen->mm_GART, size, &bounce, &offset);
if (!bounce)
return FALSE;
nouveau_bo_ref(NULL, &bounce);
if (mm)
- nv50_mm_free(mm);
+ nouveau_mm_free(mm);
return TRUE;
}
nv50_buffer_upload(struct nv50_context *nv50, struct nv50_resource *buf,
unsigned start, unsigned size)
{
- struct nv50_mm_allocation *mm;
+ struct nouveau_mm_allocation *mm;
struct nouveau_bo *bounce = NULL;
uint32_t offset;
return TRUE;
}
- mm = nv50_mm_allocate(nv50->screen->mm_GART, size, &bounce, &offset);
+ mm = nouveau_mm_allocate(nv50->screen->mm_GART, size, &bounce, &offset);
if (!bounce)
return FALSE;
FREE(buf->data);
} else
if (old_domain != 0 && new_domain != 0) {
- struct nv50_mm_allocation *mm = buf->mm;
+ struct nouveau_mm_allocation *mm = buf->mm;
if (new_domain == NOUVEAU_BO_VRAM) {
/* keep a system memory copy of our data in case we hit a fallback */
+++ /dev/null
-
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include "util/u_double_list.h"
-
-#include "nv50_screen.h"
-
-#define MM_MIN_ORDER 7
-#define MM_MAX_ORDER 20
-
-#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
-
-#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
-#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
-
-struct mm_bucket {
- struct list_head free;
- struct list_head used;
- struct list_head full;
- int num_free;
-};
-
-struct nv50_mman {
- struct nouveau_device *dev;
- struct mm_bucket bucket[MM_NUM_BUCKETS];
- uint32_t storage_type;
- uint32_t domain;
- uint64_t allocated;
-};
-
-struct mm_slab {
- struct list_head head;
- struct nouveau_bo *bo;
- struct nv50_mman *cache;
- int order;
- int count;
- int free;
- uint32_t bits[0];
-};
-
-static int
-mm_slab_alloc(struct mm_slab *slab)
-{
- int i, n, b;
-
- if (slab->free == 0)
- return -1;
-
- for (i = 0; i < (slab->count + 31) / 32; ++i) {
- b = ffs(slab->bits[i]) - 1;
- if (b >= 0) {
- n = i * 32 + b;
- assert(n < slab->count);
- slab->free--;
- slab->bits[i] &= ~(1 << b);
- return n;
- }
- }
- return -1;
-}
-
-static INLINE void
-mm_slab_free(struct mm_slab *slab, int i)
-{
- assert(i < slab->count);
- slab->bits[i / 32] |= 1 << (i % 32);
- slab->free++;
- assert(slab->free <= slab->count);
-}
-
-static INLINE int
-mm_get_order(uint32_t size)
-{
- int s = __builtin_clz(size) ^ 31;
-
- if (size > (1 << s))
- s += 1;
- return s;
-}
-
-static struct mm_bucket *
-mm_bucket_by_order(struct nv50_mman *cache, int order)
-{
- if (order > MM_MAX_ORDER)
- return NULL;
- return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
-}
-
-static struct mm_bucket *
-mm_bucket_by_size(struct nv50_mman *cache, unsigned size)
-{
- return mm_bucket_by_order(cache, mm_get_order(size));
-}
-
-/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
-static INLINE uint32_t
-mm_default_slab_size(unsigned chunk_order)
-{
- static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
- {
- 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
- };
-
- assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
-
- return 1 << slab_order[chunk_order - MM_MIN_ORDER];
-}
-
-static int
-mm_slab_new(struct nv50_mman *cache, int chunk_order)
-{
- struct mm_slab *slab;
- int words, ret;
- const uint32_t size = mm_default_slab_size(chunk_order);
-
- words = ((size >> chunk_order) + 31) / 32;
- assert(words);
-
- slab = MALLOC(sizeof(struct mm_slab) + words * 4);
- if (!slab)
- return PIPE_ERROR_OUT_OF_MEMORY;
-
- memset(&slab->bits[0], ~0, words * 4);
-
- slab->bo = NULL;
- ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
- 0, cache->storage_type, &slab->bo);
- if (ret) {
- FREE(slab);
- return PIPE_ERROR_OUT_OF_MEMORY;
- }
-
- LIST_INITHEAD(&slab->head);
-
- slab->cache = cache;
- slab->order = chunk_order;
- slab->count = slab->free = size >> chunk_order;
-
- LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
-
- cache->allocated += size;
-
- debug_printf("MM: new slab, total memory = %lu KiB\n",
- cache->allocated / 1024);
-
- return PIPE_OK;
-}
-
-/* @return token to identify slab or NULL if we just allocated a new bo */
-struct nv50_mm_allocation *
-nv50_mm_allocate(struct nv50_mman *cache,
- uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
-{
- struct mm_bucket *bucket;
- struct mm_slab *slab;
- struct nv50_mm_allocation *alloc;
- int ret;
-
- bucket = mm_bucket_by_size(cache, size);
- if (!bucket) {
- ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
- 0, cache->storage_type, bo);
- if (ret)
- debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
-
- *offset = 0;
- return NULL;
- }
-
- if (!LIST_IS_EMPTY(&bucket->used)) {
- slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
- } else {
- if (LIST_IS_EMPTY(&bucket->free)) {
- mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
- }
- slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
-
- LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->used);
- }
-
- *offset = mm_slab_alloc(slab) << slab->order;
-
- alloc = MALLOC_STRUCT(nv50_mm_allocation);
- if (!alloc)
- return NULL;
-
- nouveau_bo_ref(slab->bo, bo);
-
- if (slab->free == 0) {
- LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->full);
- }
-
- alloc->next = NULL;
- alloc->offset = *offset;
- alloc->priv = (void *)slab;
-
- return alloc;
-}
-
-void
-nv50_mm_free(struct nv50_mm_allocation *alloc)
-{
- struct mm_slab *slab = (struct mm_slab *)alloc->priv;
- struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
-
- mm_slab_free(slab, alloc->offset >> slab->order);
-
- if (slab->free == 1) {
- LIST_DEL(&slab->head);
-
- if (slab->count > 1)
- LIST_ADDTAIL(&slab->head, &bucket->used);
- else
- LIST_ADDTAIL(&slab->head, &bucket->free);
- }
-
- FREE(alloc);
-}
-
-struct nv50_mman *
-nv50_mm_create(struct nouveau_device *dev, uint32_t domain,
- uint32_t storage_type)
-{
- struct nv50_mman *cache = MALLOC_STRUCT(nv50_mman);
- int i;
-
- if (!cache)
- return NULL;
-
- cache->dev = dev;
- cache->domain = domain;
- cache->storage_type = storage_type;
- cache->allocated = 0;
-
- for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- LIST_INITHEAD(&cache->bucket[i].free);
- LIST_INITHEAD(&cache->bucket[i].used);
- LIST_INITHEAD(&cache->bucket[i].full);
- }
-
- return cache;
-}
-
-static INLINE void
-nv50_mm_free_slabs(struct list_head *head)
-{
- struct mm_slab *slab, *next;
-
- LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
- LIST_DEL(&slab->head);
- nouveau_bo_ref(NULL, &slab->bo);
- FREE(slab);
- }
-}
-
-void
-nv50_mm_destroy(struct nv50_mman *cache)
-{
- int i;
-
- if (!cache)
- return;
-
- for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
- !LIST_IS_EMPTY(&cache->bucket[i].full))
- debug_printf("WARNING: destroying GPU memory cache "
- "with some buffers still in use\n");
-
- nv50_mm_free_slabs(&cache->bucket[i].free);
- nv50_mm_free_slabs(&cache->bucket[i].used);
- nv50_mm_free_slabs(&cache->bucket[i].full);
- }
-}
-
uint32_t offset; /* base + i * 16 */
boolean ready;
boolean is64bit;
- struct nv50_mm_allocation *mm;
+ struct nouveau_mm_allocation *mm;
};
#define NV50_QUERY_ALLOC_SPACE 128
nouveau_bo_ref(NULL, &q->bo);
if (q->mm) {
if (q->ready)
- nv50_mm_free(q->mm);
+ nouveau_mm_free(q->mm);
else
- nouveau_fence_work(screen->base.fence.current, nv50_mm_free, q->mm);
+ nouveau_fence_work(screen->base.fence.current, nouveau_mm_free, q->mm);
}
}
if (size) {
- q->mm = nv50_mm_allocate(screen->mm_GART, size, &q->bo, &q->base);
+ q->mm = nouveau_mm_allocate(screen->mm_GART, size, &q->bo, &q->base);
if (!q->bo)
return FALSE;
q->offset = q->base;
struct nouveau_fence *fence;
struct nouveau_fence *fence_wr;
- struct nv50_mm_allocation *mm;
+ struct nouveau_mm_allocation *mm;
};
void
if (screen->tic.entries)
FREE(screen->tic.entries);
- nv50_mm_destroy(screen->mm_GART);
- nv50_mm_destroy(screen->mm_VRAM);
- nv50_mm_destroy(screen->mm_VRAM_fe0);
+ nouveau_mm_destroy(screen->mm_GART);
+ nouveau_mm_destroy(screen->mm_VRAM);
+ nouveau_mm_destroy(screen->mm_VRAM_fe0);
nouveau_grobj_free(&screen->tesla);
nouveau_grobj_free(&screen->eng2d);
screen->tic.entries = CALLOC(4096, sizeof(void *));
screen->tsc.entries = screen->tic.entries + 2048;
- screen->mm_GART = nv50_mm_create(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
- 0x000);
- screen->mm_VRAM = nv50_mm_create(dev, NOUVEAU_BO_VRAM, 0x000);
- screen->mm_VRAM_fe0 = nv50_mm_create(dev, NOUVEAU_BO_VRAM, 0xfe0);
+ screen->mm_GART = nouveau_mm_create(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
+ 0x000);
+ screen->mm_VRAM = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, 0x000);
+ screen->mm_VRAM_fe0 = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, 0xfe0);
nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE);
#define NOUVEAU_NVC0
#include "nouveau/nouveau_screen.h"
#include "nouveau/nouveau_fence.h"
+#include "nouveau/nouveau_mm.h"
#undef NOUVEAU_NVC0
#include "nv50_winsys.h"
#include "nv50_stateobj.h"
#define NV50_TIC_MAX_ENTRIES 2048
#define NV50_TSC_MAX_ENTRIES 2048
-struct nv50_mman;
struct nv50_context;
#define NV50_SCRATCH_SIZE (2 << 20)
struct nouveau_notifier *sync;
- struct nv50_mman *mm_GART;
- struct nv50_mman *mm_VRAM;
- struct nv50_mman *mm_VRAM_fe0;
+ struct nouveau_mman *mm_GART;
+ struct nouveau_mman *mm_VRAM;
+ struct nouveau_mman *mm_VRAM_fe0;
struct nouveau_grobj *tesla;
struct nouveau_grobj *eng2d;
return (struct nv50_screen *)screen;
}
-/* Since a resource can be migrated, we need to decouple allocations from
- * them. This struct is linked with fences for delayed freeing of allocs.
- */
-struct nv50_mm_allocation {
- struct nv50_mm_allocation *next;
- void *priv;
- uint32_t offset;
-};
-
-extern struct nv50_mman *
-nv50_mm_create(struct nouveau_device *, uint32_t domain, uint32_t storage_type);
-
-extern void
-nv50_mm_destroy(struct nv50_mman *);
-
-extern struct nv50_mm_allocation *
-nv50_mm_allocate(struct nv50_mman *,
- uint32_t size, struct nouveau_bo **, uint32_t *offset);
-extern void
-nv50_mm_free(struct nv50_mm_allocation *);
-
void nv50_screen_make_buffers_resident(struct nv50_screen *);
int nv50_screen_tic_alloc(struct nv50_screen *, void *);