-#ifndef __NOUVEAU_RESOURCE_H__
-#define __NOUVEAU_RESOURCE_H__
+#ifndef __NOUVEAU_BUFFER_H__
+#define __NOUVEAU_BUFFER_H__
+#include "util/u_range.h"
#include "util/u_transfer.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
struct pipe_resource;
+struct nouveau_context;
struct nouveau_bo;
-#define NOUVEAU_BUFFER_SCORE_MIN -25000
-#define NOUVEAU_BUFFER_SCORE_MAX 25000
-#define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
-
/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
* resource->data has not been updated to reflect modified VRAM contents
*
* USER_MEMORY: resource->data is a pointer to client memory and may change
* between GL calls
+ *
+ * USER_PTR: bo is backed by user memory mapped into the GPUs VM
*/
-#define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 0)
+#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
+#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
+#define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 2)
+#define NOUVEAU_BUFFER_STATUS_USER_PTR (1 << 6)
#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
+#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
+
/* Resources, if mapped into the GPU's address space, are guaranteed to
* have constant virtual addresses (nv50+).
*
struct pipe_resource base;
const struct u_resource_vtbl *vtbl;
- uint8_t *data;
+ uint64_t address; /* virtual address (nv50+) */
+
+ uint8_t *data; /* resource's contents, if domain == 0, or cached */
struct nouveau_bo *bo;
- uint32_t offset;
+ uint32_t offset; /* offset into the data/bo */
uint8_t status;
uint8_t domain;
- int16_t score; /* low if mapped very often, if high can move to VRAM */
+ uint16_t cb_bindings[6]; /* per-shader per-slot bindings */
struct nouveau_fence *fence;
struct nouveau_fence *fence_wr;
struct nouveau_mm_allocation *mm;
+
+ /* buffer range that has been initialized */
+ struct util_range valid_buffer_range;
};
void
nouveau_buffer_release_gpu_storage(struct nv04_resource *);
-boolean
-nouveau_buffer_download(struct pipe_context *, struct nv04_resource *,
- unsigned start, unsigned size);
+void
+nouveau_copy_buffer(struct nouveau_context *,
+ struct nv04_resource *dst, unsigned dst_pos,
+ struct nv04_resource *src, unsigned src_pos, unsigned size);
-boolean
-nouveau_buffer_migrate(struct pipe_context *,
+bool
+nouveau_buffer_migrate(struct nouveau_context *,
struct nv04_resource *, unsigned domain);
-static INLINE void
-nouveau_buffer_adjust_score(struct pipe_context *pipe,
- struct nv04_resource *res, int16_t score)
-{
- if (score < 0) {
- if (res->score > NOUVEAU_BUFFER_SCORE_MIN)
- res->score += score;
- } else
- if (score > 0){
- if (res->score < NOUVEAU_BUFFER_SCORE_MAX)
- res->score += score;
- if (res->domain == NOUVEAU_BO_GART &&
- res->score > NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD)
- nouveau_buffer_migrate(pipe, res, NOUVEAU_BO_VRAM);
- }
-}
-
-/* XXX: wait for fence (atm only using this for vertex push) */
-static INLINE void *
-nouveau_resource_map_offset(struct pipe_context *pipe,
- struct nv04_resource *res, uint32_t offset,
- uint32_t flags)
-{
- void *map;
-
- nouveau_buffer_adjust_score(pipe, res, -250);
-
- if ((res->domain == NOUVEAU_BO_VRAM) &&
- (res->status & NOUVEAU_BUFFER_STATUS_DIRTY))
- nouveau_buffer_download(pipe, res, 0, res->base.width0);
+void *
+nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
+ uint32_t offset, uint32_t flags);
- if ((res->domain != NOUVEAU_BO_GART) ||
- (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
- return res->data + offset;
-
- if (res->mm)
- flags |= NOUVEAU_BO_NOSYNC;
-
- if (nouveau_bo_map_range(res->bo, res->offset + offset,
- res->base.width0, flags))
- return NULL;
-
- map = res->bo->map;
- nouveau_bo_unmap(res->bo);
- return map;
-}
-
-static INLINE void
+static inline void
nouveau_resource_unmap(struct nv04_resource *res)
{
/* no-op */
}
-static INLINE struct nv04_resource *
+static inline struct nv04_resource *
nv04_resource(struct pipe_resource *resource)
{
return (struct nv04_resource *)resource;
}
/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
-static INLINE boolean
+static inline bool
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
return nv04_resource(resource)->domain != 0;
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ);
+struct pipe_resource *
+nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ,
+ void *user_ptr);
+
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
-boolean
-nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
- unsigned size);
+bool
+nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
+ unsigned base, unsigned size);
+
+void
+nouveau_buffer_invalidate(struct pipe_context *pipe,
+ struct pipe_resource *resource);
+
+/* Copy data to a scratch buffer and return address & bo the data resides in.
+ * Returns 0 on failure.
+ */
+uint64_t
+nouveau_scratch_data(struct nouveau_context *,
+ const void *data, unsigned base, unsigned size,
+ struct nouveau_bo **);
#endif