nouveau: remove automatic buffer migration heuristics
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.c
1
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
5
6 #include "nouveau_screen.h"
7 #include "nouveau_context.h"
8 #include "nouveau_winsys.h"
9 #include "nouveau_fence.h"
10 #include "nouveau_buffer.h"
11 #include "nouveau_mm.h"
12
13 struct nouveau_transfer {
14 struct pipe_transfer base;
15 };
16
17 static INLINE struct nouveau_transfer *
18 nouveau_transfer(struct pipe_transfer *transfer)
19 {
20 return (struct nouveau_transfer *)transfer;
21 }
22
23 static INLINE boolean
24 nouveau_buffer_allocate(struct nouveau_screen *screen,
25 struct nv04_resource *buf, unsigned domain)
26 {
27 uint32_t size = buf->base.width0;
28
29 if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
30 size = align(size, 0x100);
31
32 if (domain == NOUVEAU_BO_VRAM) {
33 buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
34 &buf->bo, &buf->offset);
35 if (!buf->bo)
36 return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
37 } else
38 if (domain == NOUVEAU_BO_GART) {
39 buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
40 &buf->bo, &buf->offset);
41 if (!buf->bo)
42 return FALSE;
43 }
44 if (domain != NOUVEAU_BO_GART) {
45 if (!buf->data) {
46 buf->data = MALLOC(buf->base.width0);
47 if (!buf->data)
48 return FALSE;
49 }
50 }
51 buf->domain = domain;
52 return TRUE;
53 }
54
55 static INLINE void
56 release_allocation(struct nouveau_mm_allocation **mm,
57 struct nouveau_fence *fence)
58 {
59 nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
60 (*mm) = NULL;
61 }
62
63 INLINE void
64 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
65 {
66 nouveau_bo_ref(NULL, &buf->bo);
67
68 if (buf->mm)
69 release_allocation(&buf->mm, buf->fence);
70
71 buf->domain = 0;
72 }
73
74 static INLINE boolean
75 nouveau_buffer_reallocate(struct nouveau_screen *screen,
76 struct nv04_resource *buf, unsigned domain)
77 {
78 nouveau_buffer_release_gpu_storage(buf);
79
80 return nouveau_buffer_allocate(screen, buf, domain);
81 }
82
83 static void
84 nouveau_buffer_destroy(struct pipe_screen *pscreen,
85 struct pipe_resource *presource)
86 {
87 struct nv04_resource *res = nv04_resource(presource);
88
89 nouveau_buffer_release_gpu_storage(res);
90
91 if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
92 FREE(res->data);
93
94 FREE(res);
95 }
96
97 /* Maybe just migrate to GART right away if we actually need to do this. */
98 boolean
99 nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
100 unsigned start, unsigned size)
101 {
102 struct nouveau_mm_allocation *mm;
103 struct nouveau_bo *bounce = NULL;
104 uint32_t offset;
105
106 assert(buf->domain == NOUVEAU_BO_VRAM);
107
108 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
109 if (!bounce)
110 return FALSE;
111
112 nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
113 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);
114
115 if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
116 return FALSE;
117 memcpy(buf->data + start, bounce->map, size);
118 nouveau_bo_unmap(bounce);
119
120 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
121
122 nouveau_bo_ref(NULL, &bounce);
123 if (mm)
124 nouveau_mm_free(mm);
125 return TRUE;
126 }
127
128 static boolean
129 nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
130 unsigned start, unsigned size)
131 {
132 struct nouveau_mm_allocation *mm;
133 struct nouveau_bo *bounce = NULL;
134 uint32_t offset;
135
136 if (size <= 192) {
137 if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
138 nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
139 start, size / 4, (const uint32_t *)(buf->data + start));
140 else
141 nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
142 size, buf->data + start);
143 return TRUE;
144 }
145
146 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
147 if (!bounce)
148 return FALSE;
149
150 nouveau_bo_map_range(bounce, offset, size,
151 NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
152 memcpy(bounce->map, buf->data + start, size);
153 nouveau_bo_unmap(bounce);
154
155 nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
156 bounce, offset, NOUVEAU_BO_GART, size);
157
158 nouveau_bo_ref(NULL, &bounce);
159 if (mm)
160 release_allocation(&mm, nv->screen->fence.current);
161
162 if (start == 0 && size == buf->base.width0)
163 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
164 return TRUE;
165 }
166
167 static struct pipe_transfer *
168 nouveau_buffer_transfer_get(struct pipe_context *pipe,
169 struct pipe_resource *resource,
170 unsigned level, unsigned usage,
171 const struct pipe_box *box)
172 {
173 struct nv04_resource *buf = nv04_resource(resource);
174 struct nouveau_context *nv = nouveau_context(pipe);
175 struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
176 if (!xfr)
177 return NULL;
178
179 xfr->base.resource = resource;
180 xfr->base.box.x = box->x;
181 xfr->base.box.width = box->width;
182 xfr->base.usage = usage;
183
184 if (buf->domain == NOUVEAU_BO_VRAM) {
185 if (usage & PIPE_TRANSFER_READ) {
186 if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
187 nouveau_buffer_download(nv, buf, 0, buf->base.width0);
188 }
189 }
190
191 return &xfr->base;
192 }
193
194 static void
195 nouveau_buffer_transfer_destroy(struct pipe_context *pipe,
196 struct pipe_transfer *transfer)
197 {
198 struct nv04_resource *buf = nv04_resource(transfer->resource);
199 struct nouveau_transfer *xfr = nouveau_transfer(transfer);
200 struct nouveau_context *nv = nouveau_context(pipe);
201
202 if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
203 if (buf->domain == NOUVEAU_BO_VRAM) {
204 nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
205 }
206
207 if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
208 PIPE_BIND_INDEX_BUFFER)))
209 nouveau_context(pipe)->vbo_dirty = TRUE;
210 }
211
212 FREE(xfr);
213 }
214
215 static INLINE boolean
216 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
217 {
218 if (rw == PIPE_TRANSFER_READ) {
219 if (!buf->fence_wr)
220 return TRUE;
221 if (!nouveau_fence_wait(buf->fence_wr))
222 return FALSE;
223 } else {
224 if (!buf->fence)
225 return TRUE;
226 if (!nouveau_fence_wait(buf->fence))
227 return FALSE;
228
229 nouveau_fence_ref(NULL, &buf->fence);
230 }
231 nouveau_fence_ref(NULL, &buf->fence_wr);
232
233 return TRUE;
234 }
235
236 static INLINE boolean
237 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
238 {
239 if (rw == PIPE_TRANSFER_READ)
240 return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
241 else
242 return (buf->fence && !nouveau_fence_signalled(buf->fence));
243 }
244
245 static void *
246 nouveau_buffer_transfer_map(struct pipe_context *pipe,
247 struct pipe_transfer *transfer)
248 {
249 struct nouveau_transfer *xfr = nouveau_transfer(transfer);
250 struct nv04_resource *buf = nv04_resource(transfer->resource);
251 struct nouveau_bo *bo = buf->bo;
252 uint8_t *map;
253 int ret;
254 uint32_t offset = xfr->base.box.x;
255 uint32_t flags;
256
257 if (buf->domain != NOUVEAU_BO_GART)
258 return buf->data + offset;
259
260 if (buf->mm)
261 flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
262 else
263 flags = nouveau_screen_transfer_flags(xfr->base.usage);
264
265 offset += buf->offset;
266
267 ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
268 if (ret)
269 return NULL;
270 map = bo->map;
271
272 /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
273 * not doing so might make future maps fail or trigger "reloc while mapped"
274 * errors. For now, mappings to userspace are guaranteed to be persistent.
275 */
276 nouveau_bo_unmap(bo);
277
278 if (buf->mm) {
279 if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
280 if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
281 return NULL;
282 } else
283 if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
284 nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
285 }
286 }
287 return map;
288 }
289
290
291
292 static void
293 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
294 struct pipe_transfer *transfer,
295 const struct pipe_box *box)
296 {
297 struct nv04_resource *res = nv04_resource(transfer->resource);
298 struct nouveau_bo *bo = res->bo;
299 unsigned offset = res->offset + transfer->box.x + box->x;
300
301 /* not using non-snoop system memory yet, no need for cflush */
302 if (1)
303 return;
304
305 /* XXX: maybe need to upload for VRAM buffers here */
306
307 nouveau_screen_bo_map_flush_range(pipe->screen, bo, offset, box->width);
308 }
309
310 static void
311 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
312 struct pipe_transfer *transfer)
313 {
314 /* we've called nouveau_bo_unmap right after map */
315 }
316
317 const struct u_resource_vtbl nouveau_buffer_vtbl =
318 {
319 u_default_resource_get_handle, /* get_handle */
320 nouveau_buffer_destroy, /* resource_destroy */
321 nouveau_buffer_transfer_get, /* get_transfer */
322 nouveau_buffer_transfer_destroy, /* transfer_destroy */
323 nouveau_buffer_transfer_map, /* transfer_map */
324 nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
325 nouveau_buffer_transfer_unmap, /* transfer_unmap */
326 u_default_transfer_inline_write /* transfer_inline_write */
327 };
328
329 struct pipe_resource *
330 nouveau_buffer_create(struct pipe_screen *pscreen,
331 const struct pipe_resource *templ)
332 {
333 struct nouveau_screen *screen = nouveau_screen(pscreen);
334 struct nv04_resource *buffer;
335 boolean ret;
336
337 buffer = CALLOC_STRUCT(nv04_resource);
338 if (!buffer)
339 return NULL;
340
341 buffer->base = *templ;
342 buffer->vtbl = &nouveau_buffer_vtbl;
343 pipe_reference_init(&buffer->base.reference, 1);
344 buffer->base.screen = pscreen;
345
346 if ((buffer->base.bind & screen->sysmem_bindings) == screen->sysmem_bindings)
347 ret = nouveau_buffer_allocate(screen, buffer, 0);
348 else
349 ret = nouveau_buffer_allocate(screen, buffer, NOUVEAU_BO_GART);
350
351 if (ret == FALSE)
352 goto fail;
353
354 return &buffer->base;
355
356 fail:
357 FREE(buffer);
358 return NULL;
359 }
360
361
362 struct pipe_resource *
363 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
364 unsigned bytes, unsigned bind)
365 {
366 struct nv04_resource *buffer;
367
368 buffer = CALLOC_STRUCT(nv04_resource);
369 if (!buffer)
370 return NULL;
371
372 pipe_reference_init(&buffer->base.reference, 1);
373 buffer->vtbl = &nouveau_buffer_vtbl;
374 buffer->base.screen = pscreen;
375 buffer->base.format = PIPE_FORMAT_R8_UNORM;
376 buffer->base.usage = PIPE_USAGE_IMMUTABLE;
377 buffer->base.bind = bind;
378 buffer->base.width0 = bytes;
379 buffer->base.height0 = 1;
380 buffer->base.depth0 = 1;
381
382 buffer->data = ptr;
383 buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
384
385 return &buffer->base;
386 }
387
388 /* Like download, but for GART buffers. Merge ? */
389 static INLINE boolean
390 nouveau_buffer_data_fetch(struct nv04_resource *buf, struct nouveau_bo *bo,
391 unsigned offset, unsigned size)
392 {
393 if (!buf->data) {
394 buf->data = MALLOC(size);
395 if (!buf->data)
396 return FALSE;
397 }
398 if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
399 return FALSE;
400 memcpy(buf->data, bo->map, size);
401 nouveau_bo_unmap(bo);
402
403 return TRUE;
404 }
405
406 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
407 boolean
408 nouveau_buffer_migrate(struct nouveau_context *nv,
409 struct nv04_resource *buf, const unsigned new_domain)
410 {
411 struct nouveau_screen *screen = nv->screen;
412 struct nouveau_bo *bo;
413 const unsigned old_domain = buf->domain;
414 unsigned size = buf->base.width0;
415 unsigned offset;
416 int ret;
417
418 assert(new_domain != old_domain);
419
420 if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
421 if (!nouveau_buffer_allocate(screen, buf, new_domain))
422 return FALSE;
423 ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
424 NOUVEAU_BO_NOSYNC);
425 if (ret)
426 return ret;
427 memcpy(buf->bo->map, buf->data, size);
428 nouveau_bo_unmap(buf->bo);
429 FREE(buf->data);
430 } else
431 if (old_domain != 0 && new_domain != 0) {
432 struct nouveau_mm_allocation *mm = buf->mm;
433
434 if (new_domain == NOUVEAU_BO_VRAM) {
435 /* keep a system memory copy of our data in case we hit a fallback */
436 if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size))
437 return FALSE;
438 if (nouveau_mesa_debug)
439 debug_printf("migrating %u KiB to VRAM\n", size / 1024);
440 }
441
442 offset = buf->offset;
443 bo = buf->bo;
444 buf->bo = NULL;
445 buf->mm = NULL;
446 nouveau_buffer_allocate(screen, buf, new_domain);
447
448 nv->copy_data(nv, buf->bo, buf->offset, new_domain,
449 bo, offset, old_domain, buf->base.width0);
450
451 nouveau_bo_ref(NULL, &bo);
452 if (mm)
453 release_allocation(&mm, screen->fence.current);
454 } else
455 if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
456 if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
457 return FALSE;
458 if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
459 return FALSE;
460 } else
461 return FALSE;
462
463 assert(buf->domain == new_domain);
464 return TRUE;
465 }
466
467 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
468 * We'd like to only allocate @size bytes here, but then we'd have to rebase
469 * the vertex indices ...
470 */
471 boolean
472 nouveau_user_buffer_upload(struct nv04_resource *buf,
473 unsigned base, unsigned size)
474 {
475 struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
476 int ret;
477
478 assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
479
480 buf->base.width0 = base + size;
481 if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
482 return FALSE;
483
484 ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
485 NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
486 if (ret)
487 return FALSE;
488 memcpy(buf->bo->map, buf->data + base, size);
489 nouveau_bo_unmap(buf->bo);
490
491 return TRUE;
492 }