nvc0: fix user vertex buffer updates
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_resource.h
1
2 #ifndef __NVC0_RESOURCE_H__
3 #define __NVC0_RESOURCE_H__
4
5 #include "util/u_transfer.h"
6 #include "util/u_double_list.h"
7 #define NOUVEAU_NVC0
8 #include "nouveau/nouveau_winsys.h"
9 #undef NOUVEAU_NVC0
10
11 #include "nvc0_fence.h"
12
13 struct pipe_resource;
14 struct nouveau_bo;
15 struct nvc0_context;
16
17 #define NVC0_BUFFER_SCORE_MIN -25000
18 #define NVC0_BUFFER_SCORE_MAX 25000
19 #define NVC0_BUFFER_SCORE_VRAM_THRESHOLD 20000
20
21 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
22 * resource->data has not been updated to reflect modified VRAM contents
23 *
24 * USER_MEMORY: resource->data is a pointer to client memory and may change
25 * between GL calls
26 */
27 #define NVC0_BUFFER_STATUS_DIRTY (1 << 0)
28 #define NVC0_BUFFER_STATUS_USER_MEMORY (1 << 7)
29
30 /* Resources, if mapped into the GPU's address space, are guaranteed to
31 * have constant virtual addresses.
32 * The address of a resource will lie within the nouveau_bo referenced,
33 * and this bo should be added to the memory manager's validation list.
34 */
35 struct nvc0_resource {
36 struct pipe_resource base;
37 const struct u_resource_vtbl *vtbl;
38
39 uint8_t *data;
40 struct nouveau_bo *bo;
41 uint32_t offset;
42
43 uint8_t status;
44 uint8_t domain;
45
46 int16_t score; /* low if mapped very often, if high can move to VRAM */
47
48 struct nvc0_fence *fence;
49 struct nvc0_fence *fence_wr;
50
51 struct nvc0_mm_allocation *mm;
52 };
53
54 void
55 nvc0_buffer_release_gpu_storage(struct nvc0_resource *);
56
57 boolean
58 nvc0_buffer_download(struct nvc0_context *, struct nvc0_resource *,
59 unsigned start, unsigned size);
60
61 boolean
62 nvc0_buffer_migrate(struct nvc0_context *,
63 struct nvc0_resource *, unsigned domain);
64
65 static INLINE void
66 nvc0_buffer_adjust_score(struct nvc0_context *nvc0, struct nvc0_resource *res,
67 int16_t score)
68 {
69 if (score < 0) {
70 if (res->score > NVC0_BUFFER_SCORE_MIN)
71 res->score += score;
72 } else
73 if (score > 0){
74 if (res->score < NVC0_BUFFER_SCORE_MAX)
75 res->score += score;
76 if (res->domain == NOUVEAU_BO_GART &&
77 res->score > NVC0_BUFFER_SCORE_VRAM_THRESHOLD)
78 nvc0_buffer_migrate(nvc0, res, NOUVEAU_BO_VRAM);
79 }
80 }
81
82 /* XXX: wait for fence (atm only using this for vertex push) */
83 static INLINE void *
84 nvc0_resource_map_offset(struct nvc0_context *nvc0,
85 struct nvc0_resource *res, uint32_t offset,
86 uint32_t flags)
87 {
88 void *map;
89
90 nvc0_buffer_adjust_score(nvc0, res, -250);
91
92 if ((res->domain == NOUVEAU_BO_VRAM) &&
93 (res->status & NVC0_BUFFER_STATUS_DIRTY))
94 nvc0_buffer_download(nvc0, res, 0, res->base.width0);
95
96 if ((res->domain != NOUVEAU_BO_GART) ||
97 (res->status & NVC0_BUFFER_STATUS_USER_MEMORY))
98 return res->data + offset;
99
100 if (res->mm)
101 flags |= NOUVEAU_BO_NOSYNC;
102
103 if (nouveau_bo_map_range(res->bo, res->offset + offset,
104 res->base.width0, flags))
105 return NULL;
106
107 map = res->bo->map;
108 nouveau_bo_unmap(res->bo);
109 return map;
110 }
111
112 static INLINE void
113 nvc0_resource_unmap(struct nvc0_resource *res)
114 {
115 /* no-op */
116 }
117
118 #define NVC0_TILE_DIM_SHIFT(m, d) (((m) >> (d * 4)) & 0xf)
119
120 #define NVC0_TILE_PITCH(m) (64 << NVC0_TILE_DIM_SHIFT(m, 0))
121 #define NVC0_TILE_HEIGHT(m) ( 8 << NVC0_TILE_DIM_SHIFT(m, 1))
122 #define NVC0_TILE_DEPTH(m) ( 1 << NVC0_TILE_DIM_SHIFT(m, 2))
123
124 #define NVC0_TILE_SIZE_2D(m) (((64 * 8) << \
125 NVC0_TILE_DIM_SHIFT(m, 0)) << \
126 NVC0_TILE_DIM_SHIFT(m, 1))
127
128 #define NVC0_TILE_SIZE(m) (NVC0_TILE_SIZE_2D(m) << NVC0_TILE_DIM_SHIFT(m, 2))
129
130 struct nvc0_miptree_level {
131 uint32_t offset;
132 uint32_t pitch;
133 uint32_t tile_mode;
134 };
135
136 #define NVC0_MAX_TEXTURE_LEVELS 16
137
138 struct nvc0_miptree {
139 struct nvc0_resource base;
140 struct nvc0_miptree_level level[NVC0_MAX_TEXTURE_LEVELS];
141 uint32_t total_size;
142 uint32_t layer_stride;
143 boolean layout_3d; /* TRUE if layer count varies with mip level */
144 };
145
146 static INLINE struct nvc0_miptree *
147 nvc0_miptree(struct pipe_resource *pt)
148 {
149 return (struct nvc0_miptree *)pt;
150 }
151
152 static INLINE struct nvc0_resource *
153 nvc0_resource(struct pipe_resource *resource)
154 {
155 return (struct nvc0_resource *)resource;
156 }
157
158 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
159 static INLINE boolean
160 nvc0_resource_mapped_by_gpu(struct pipe_resource *resource)
161 {
162 return nvc0_resource(resource)->domain != 0;
163 }
164
165 void
166 nvc0_init_resource_functions(struct pipe_context *pcontext);
167
168 void
169 nvc0_screen_init_resource_functions(struct pipe_screen *pscreen);
170
171 /* Internal functions:
172 */
173 struct pipe_resource *
174 nvc0_miptree_create(struct pipe_screen *pscreen,
175 const struct pipe_resource *tmp);
176
177 struct pipe_resource *
178 nvc0_miptree_from_handle(struct pipe_screen *pscreen,
179 const struct pipe_resource *template,
180 struct winsys_handle *whandle);
181
182 struct pipe_resource *
183 nvc0_buffer_create(struct pipe_screen *pscreen,
184 const struct pipe_resource *templ);
185
186 struct pipe_resource *
187 nvc0_user_buffer_create(struct pipe_screen *screen,
188 void *ptr,
189 unsigned bytes,
190 unsigned usage);
191
192
193 struct pipe_surface *
194 nvc0_miptree_surface_new(struct pipe_context *,
195 struct pipe_resource *,
196 const struct pipe_surface *templ);
197
198 void
199 nvc0_miptree_surface_del(struct pipe_context *, struct pipe_surface *);
200
201 boolean
202 nvc0_user_buffer_upload(struct nvc0_resource *, unsigned base, unsigned size);
203
204 #endif