c0a781c6fd37504dbdcc617ceb97692ea67dad75
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.h
1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
3
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
6
7 struct pipe_resource;
8 struct nouveau_context;
9 struct nouveau_bo;
10
11 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
12 * resource->data has not been updated to reflect modified VRAM contents
13 *
14 * USER_MEMORY: resource->data is a pointer to client memory and may change
15 * between GL calls
16 */
17 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
18 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
19 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
20
21 /* Resources, if mapped into the GPU's address space, are guaranteed to
22 * have constant virtual addresses (nv50+).
23 *
24 * The address of a resource will lie within the nouveau_bo referenced,
25 * and this bo should be added to the memory manager's validation list.
26 */
27 struct nv04_resource {
28 struct pipe_resource base;
29 const struct u_resource_vtbl *vtbl;
30
31 uint8_t *data;
32 struct nouveau_bo *bo;
33 uint32_t offset;
34
35 uint8_t status;
36 uint8_t domain;
37
38 struct nouveau_fence *fence;
39 struct nouveau_fence *fence_wr;
40
41 struct nouveau_mm_allocation *mm;
42 };
43
44 void
45 nouveau_buffer_release_gpu_storage(struct nv04_resource *);
46
47 boolean
48 nouveau_buffer_download(struct nouveau_context *, struct nv04_resource *,
49 unsigned start, unsigned size);
50
51 boolean
52 nouveau_buffer_migrate(struct nouveau_context *,
53 struct nv04_resource *, unsigned domain);
54
55 /* XXX: wait for fence (atm only using this for vertex push) */
56 static INLINE void *
57 nouveau_resource_map_offset(struct nouveau_context *pipe,
58 struct nv04_resource *res, uint32_t offset,
59 uint32_t flags)
60 {
61 void *map;
62
63 if ((res->domain == NOUVEAU_BO_VRAM) &&
64 (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
65 nouveau_buffer_download(pipe, res, 0, res->base.width0);
66
67 if ((res->domain != NOUVEAU_BO_GART) ||
68 (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
69 return res->data + offset;
70
71 if (res->mm)
72 flags |= NOUVEAU_BO_NOSYNC;
73
74 if (nouveau_bo_map_range(res->bo, res->offset + offset,
75 res->base.width0, flags))
76 return NULL;
77
78 map = res->bo->map;
79 nouveau_bo_unmap(res->bo);
80 return map;
81 }
82
83 static INLINE void
84 nouveau_resource_unmap(struct nv04_resource *res)
85 {
86 /* no-op */
87 }
88
89 static INLINE struct nv04_resource *
90 nv04_resource(struct pipe_resource *resource)
91 {
92 return (struct nv04_resource *)resource;
93 }
94
95 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
96 static INLINE boolean
97 nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
98 {
99 return nv04_resource(resource)->domain != 0;
100 }
101
102 struct pipe_resource *
103 nouveau_buffer_create(struct pipe_screen *pscreen,
104 const struct pipe_resource *templ);
105
106 struct pipe_resource *
107 nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
108 unsigned bytes, unsigned usage);
109
110 boolean
111 nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
112 unsigned size);
113
114 #endif