#ifndef __NOUVEAU_BUFFER_H__
#define __NOUVEAU_BUFFER_H__
+#include "util/u_range.h"
#include "util/u_transfer.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
struct pipe_resource;
struct nouveau_context;
uint8_t status;
uint8_t domain;
+ uint16_t cb_bindings[6]; /* per-shader per-slot bindings */
+
struct nouveau_fence *fence;
struct nouveau_fence *fence_wr;
struct nouveau_mm_allocation *mm;
+
+ /* buffer range that has been initialized */
+ struct util_range valid_buffer_range;
};
void
struct nv04_resource *dst, unsigned dst_pos,
struct nv04_resource *src, unsigned src_pos, unsigned size);
-boolean
+bool
nouveau_buffer_migrate(struct nouveau_context *,
struct nv04_resource *, unsigned domain);
nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
uint32_t offset, uint32_t flags);
-static INLINE void
+static inline void
nouveau_resource_unmap(struct nv04_resource *res)
{
/* no-op */
}
-static INLINE struct nv04_resource *
+static inline struct nv04_resource *
nv04_resource(struct pipe_resource *resource)
{
return (struct nv04_resource *)resource;
}
/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
-static INLINE boolean
+static inline bool
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
return nv04_resource(resource)->domain != 0;
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
-boolean
+bool
nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
unsigned base, unsigned size);
+void
+nouveau_buffer_invalidate(struct pipe_context *pipe,
+ struct pipe_resource *resource);
+
/* Copy data to a scratch buffer and return address & bo the data resides in.
* Returns 0 on failure.
*/