gallium: unify transfer functions
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.c
1
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
5
6 #include "nouveau_screen.h"
7 #include "nouveau_context.h"
8 #include "nouveau_winsys.h"
9 #include "nouveau_fence.h"
10 #include "nouveau_buffer.h"
11 #include "nouveau_mm.h"
12
13 struct nouveau_transfer {
14 struct pipe_transfer base;
15 };
16
17 static INLINE struct nouveau_transfer *
18 nouveau_transfer(struct pipe_transfer *transfer)
19 {
20 return (struct nouveau_transfer *)transfer;
21 }
22
23 static INLINE boolean
24 nouveau_buffer_allocate(struct nouveau_screen *screen,
25 struct nv04_resource *buf, unsigned domain)
26 {
27 uint32_t size = buf->base.width0;
28
29 if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
30 size = align(size, 0x100);
31
32 if (domain == NOUVEAU_BO_VRAM) {
33 buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
34 &buf->bo, &buf->offset);
35 if (!buf->bo)
36 return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
37 } else
38 if (domain == NOUVEAU_BO_GART) {
39 buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
40 &buf->bo, &buf->offset);
41 if (!buf->bo)
42 return FALSE;
43 }
44 if (domain != NOUVEAU_BO_GART) {
45 if (!buf->data) {
46 buf->data = MALLOC(buf->base.width0);
47 if (!buf->data)
48 return FALSE;
49 }
50 }
51 buf->domain = domain;
52 if (buf->bo)
53 buf->address = buf->bo->offset + buf->offset;
54
55 return TRUE;
56 }
57
58 static INLINE void
59 release_allocation(struct nouveau_mm_allocation **mm,
60 struct nouveau_fence *fence)
61 {
62 nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
63 (*mm) = NULL;
64 }
65
66 INLINE void
67 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
68 {
69 nouveau_bo_ref(NULL, &buf->bo);
70
71 if (buf->mm)
72 release_allocation(&buf->mm, buf->fence);
73
74 buf->domain = 0;
75 }
76
77 static INLINE boolean
78 nouveau_buffer_reallocate(struct nouveau_screen *screen,
79 struct nv04_resource *buf, unsigned domain)
80 {
81 nouveau_buffer_release_gpu_storage(buf);
82
83 return nouveau_buffer_allocate(screen, buf, domain);
84 }
85
86 static void
87 nouveau_buffer_destroy(struct pipe_screen *pscreen,
88 struct pipe_resource *presource)
89 {
90 struct nv04_resource *res = nv04_resource(presource);
91
92 nouveau_buffer_release_gpu_storage(res);
93
94 if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
95 FREE(res->data);
96
97 nouveau_fence_ref(NULL, &res->fence);
98 nouveau_fence_ref(NULL, &res->fence_wr);
99
100 FREE(res);
101 }
102
103 /* Maybe just migrate to GART right away if we actually need to do this. */
104 boolean
105 nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
106 unsigned start, unsigned size)
107 {
108 struct nouveau_mm_allocation *mm;
109 struct nouveau_bo *bounce = NULL;
110 uint32_t offset;
111
112 assert(buf->domain == NOUVEAU_BO_VRAM);
113
114 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
115 if (!bounce)
116 return FALSE;
117
118 nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
119 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);
120
121 if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client))
122 return FALSE;
123 memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size);
124
125 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
126
127 nouveau_bo_ref(NULL, &bounce);
128 if (mm)
129 nouveau_mm_free(mm);
130 return TRUE;
131 }
132
133 static boolean
134 nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
135 unsigned start, unsigned size)
136 {
137 struct nouveau_mm_allocation *mm;
138 struct nouveau_bo *bounce = NULL;
139 uint32_t offset;
140
141 if (size <= 192 && (nv->push_data || nv->push_cb)) {
142 if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
143 nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
144 start, size / 4, (const uint32_t *)(buf->data + start));
145 else
146 nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
147 size, buf->data + start);
148 return TRUE;
149 }
150
151 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
152 if (!bounce)
153 return FALSE;
154
155 nouveau_bo_map(bounce, 0, nv->screen->client);
156 memcpy((uint8_t *)bounce->map + offset, buf->data + start, size);
157
158 nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
159 bounce, offset, NOUVEAU_BO_GART, size);
160
161 nouveau_bo_ref(NULL, &bounce);
162 if (mm)
163 release_allocation(&mm, nv->screen->fence.current);
164
165 if (start == 0 && size == buf->base.width0)
166 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
167 return TRUE;
168 }
169
170 static INLINE boolean
171 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
172 {
173 if (rw == PIPE_TRANSFER_READ) {
174 if (!buf->fence_wr)
175 return TRUE;
176 if (!nouveau_fence_wait(buf->fence_wr))
177 return FALSE;
178 } else {
179 if (!buf->fence)
180 return TRUE;
181 if (!nouveau_fence_wait(buf->fence))
182 return FALSE;
183
184 nouveau_fence_ref(NULL, &buf->fence);
185 }
186 nouveau_fence_ref(NULL, &buf->fence_wr);
187
188 return TRUE;
189 }
190
191 static INLINE boolean
192 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
193 {
194 if (rw == PIPE_TRANSFER_READ)
195 return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
196 else
197 return (buf->fence && !nouveau_fence_signalled(buf->fence));
198 }
199
200 static void *
201 nouveau_buffer_transfer_map(struct pipe_context *pipe,
202 struct pipe_resource *resource,
203 unsigned level, unsigned usage,
204 const struct pipe_box *box,
205 struct pipe_transfer **ptransfer)
206 {
207 struct nv04_resource *buf = nv04_resource(resource);
208 struct nouveau_context *nv = nouveau_context(pipe);
209 struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
210 struct nouveau_bo *bo = buf->bo;
211 uint8_t *map;
212 int ret;
213 uint32_t offset = xfr->base.box.x;
214 uint32_t flags = 0;
215
216 if (!xfr)
217 return NULL;
218
219 xfr->base.resource = resource;
220 xfr->base.box.x = box->x;
221 xfr->base.box.width = box->width;
222 xfr->base.usage = usage;
223
224 if (buf->domain == NOUVEAU_BO_VRAM) {
225 if (usage & PIPE_TRANSFER_READ) {
226 if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
227 nouveau_buffer_download(nv, buf, 0, buf->base.width0);
228 }
229 }
230
231 if (buf->domain != NOUVEAU_BO_GART) {
232 *ptransfer = &xfr->base;
233 return buf->data + offset;
234 }
235
236 if (!buf->mm)
237 flags = nouveau_screen_transfer_flags(xfr->base.usage);
238
239 offset += buf->offset;
240
241 ret = nouveau_bo_map(buf->bo, flags, nv->screen->client);
242 if (ret) {
243 FREE(xfr);
244 return NULL;
245 }
246 map = (uint8_t *)bo->map + offset;
247
248 if (buf->mm) {
249 if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
250 if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE)) {
251 FREE(xfr);
252 return NULL;
253 }
254 } else
255 if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
256 nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
257 }
258 }
259 *ptransfer = &xfr->base;
260 return map;
261 }
262
263
264
265 static void
266 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
267 struct pipe_transfer *transfer,
268 const struct pipe_box *box)
269 {
270 #if 0
271 struct nv04_resource *res = nv04_resource(transfer->resource);
272 struct nouveau_bo *bo = res->bo;
273 unsigned offset = res->offset + transfer->box.x + box->x;
274
275 /* not using non-snoop system memory yet, no need for cflush */
276 if (1)
277 return;
278
279 /* XXX: maybe need to upload for VRAM buffers here */
280 #endif
281 }
282
283 static void
284 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
285 struct pipe_transfer *transfer)
286 {
287 struct nv04_resource *buf = nv04_resource(transfer->resource);
288 struct nouveau_transfer *xfr = nouveau_transfer(transfer);
289 struct nouveau_context *nv = nouveau_context(pipe);
290
291 if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
292 if (buf->domain == NOUVEAU_BO_VRAM) {
293 nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
294 }
295
296 if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
297 PIPE_BIND_INDEX_BUFFER)))
298 nouveau_context(pipe)->vbo_dirty = TRUE;
299 }
300
301 FREE(xfr);
302 }
303
304
305 void *
306 nouveau_resource_map_offset(struct nouveau_context *nv,
307 struct nv04_resource *res, uint32_t offset,
308 uint32_t flags)
309 {
310 if ((res->domain == NOUVEAU_BO_VRAM) &&
311 (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
312 nouveau_buffer_download(nv, res, 0, res->base.width0);
313
314 if ((res->domain != NOUVEAU_BO_GART) ||
315 (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
316 return res->data + offset;
317
318 if (res->mm) {
319 unsigned rw;
320 rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
321 nouveau_buffer_sync(res, rw);
322 if (nouveau_bo_map(res->bo, 0, NULL))
323 return NULL;
324 } else {
325 if (nouveau_bo_map(res->bo, flags, nv->screen->client))
326 return NULL;
327 }
328 return (uint8_t *)res->bo->map + res->offset + offset;
329 }
330
331
332 const struct u_resource_vtbl nouveau_buffer_vtbl =
333 {
334 u_default_resource_get_handle, /* get_handle */
335 nouveau_buffer_destroy, /* resource_destroy */
336 nouveau_buffer_transfer_map, /* transfer_map */
337 nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
338 nouveau_buffer_transfer_unmap, /* transfer_unmap */
339 u_default_transfer_inline_write /* transfer_inline_write */
340 };
341
342 struct pipe_resource *
343 nouveau_buffer_create(struct pipe_screen *pscreen,
344 const struct pipe_resource *templ)
345 {
346 struct nouveau_screen *screen = nouveau_screen(pscreen);
347 struct nv04_resource *buffer;
348 boolean ret;
349
350 buffer = CALLOC_STRUCT(nv04_resource);
351 if (!buffer)
352 return NULL;
353
354 buffer->base = *templ;
355 buffer->vtbl = &nouveau_buffer_vtbl;
356 pipe_reference_init(&buffer->base.reference, 1);
357 buffer->base.screen = pscreen;
358
359 if (buffer->base.bind &
360 (screen->vidmem_bindings & screen->sysmem_bindings)) {
361 switch (buffer->base.usage) {
362 case PIPE_USAGE_DEFAULT:
363 case PIPE_USAGE_IMMUTABLE:
364 case PIPE_USAGE_STATIC:
365 buffer->domain = NOUVEAU_BO_VRAM;
366 break;
367 case PIPE_USAGE_DYNAMIC:
368 case PIPE_USAGE_STAGING:
369 case PIPE_USAGE_STREAM:
370 buffer->domain = NOUVEAU_BO_GART;
371 break;
372 default:
373 assert(0);
374 break;
375 }
376 } else {
377 if (buffer->base.bind & screen->vidmem_bindings)
378 buffer->domain = NOUVEAU_BO_VRAM;
379 else
380 if (buffer->base.bind & screen->sysmem_bindings)
381 buffer->domain = NOUVEAU_BO_GART;
382 }
383 ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
384
385 if (ret == FALSE)
386 goto fail;
387
388 return &buffer->base;
389
390 fail:
391 FREE(buffer);
392 return NULL;
393 }
394
395
396 struct pipe_resource *
397 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
398 unsigned bytes, unsigned bind)
399 {
400 struct nv04_resource *buffer;
401
402 buffer = CALLOC_STRUCT(nv04_resource);
403 if (!buffer)
404 return NULL;
405
406 pipe_reference_init(&buffer->base.reference, 1);
407 buffer->vtbl = &nouveau_buffer_vtbl;
408 buffer->base.screen = pscreen;
409 buffer->base.format = PIPE_FORMAT_R8_UNORM;
410 buffer->base.usage = PIPE_USAGE_IMMUTABLE;
411 buffer->base.bind = bind;
412 buffer->base.width0 = bytes;
413 buffer->base.height0 = 1;
414 buffer->base.depth0 = 1;
415
416 buffer->data = ptr;
417 buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
418
419 return &buffer->base;
420 }
421
422 /* Like download, but for GART buffers. Merge ? */
423 static INLINE boolean
424 nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
425 struct nouveau_bo *bo, unsigned offset, unsigned size)
426 {
427 if (!buf->data) {
428 buf->data = MALLOC(size);
429 if (!buf->data)
430 return FALSE;
431 }
432 if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->screen->client))
433 return FALSE;
434 memcpy(buf->data, (uint8_t *)bo->map + offset, size);
435
436 return TRUE;
437 }
438
439 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
440 boolean
441 nouveau_buffer_migrate(struct nouveau_context *nv,
442 struct nv04_resource *buf, const unsigned new_domain)
443 {
444 struct nouveau_screen *screen = nv->screen;
445 struct nouveau_bo *bo;
446 const unsigned old_domain = buf->domain;
447 unsigned size = buf->base.width0;
448 unsigned offset;
449 int ret;
450
451 assert(new_domain != old_domain);
452
453 if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
454 if (!nouveau_buffer_allocate(screen, buf, new_domain))
455 return FALSE;
456 ret = nouveau_bo_map(buf->bo, 0, nv->screen->client);
457 if (ret)
458 return ret;
459 memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
460 FREE(buf->data);
461 } else
462 if (old_domain != 0 && new_domain != 0) {
463 struct nouveau_mm_allocation *mm = buf->mm;
464
465 if (new_domain == NOUVEAU_BO_VRAM) {
466 /* keep a system memory copy of our data in case we hit a fallback */
467 if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
468 return FALSE;
469 if (nouveau_mesa_debug)
470 debug_printf("migrating %u KiB to VRAM\n", size / 1024);
471 }
472
473 offset = buf->offset;
474 bo = buf->bo;
475 buf->bo = NULL;
476 buf->mm = NULL;
477 nouveau_buffer_allocate(screen, buf, new_domain);
478
479 nv->copy_data(nv, buf->bo, buf->offset, new_domain,
480 bo, offset, old_domain, buf->base.width0);
481
482 nouveau_bo_ref(NULL, &bo);
483 if (mm)
484 release_allocation(&mm, screen->fence.current);
485 } else
486 if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
487 if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
488 return FALSE;
489 if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
490 return FALSE;
491 } else
492 return FALSE;
493
494 assert(buf->domain == new_domain);
495 return TRUE;
496 }
497
498 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
499 * We'd like to only allocate @size bytes here, but then we'd have to rebase
500 * the vertex indices ...
501 */
502 boolean
503 nouveau_user_buffer_upload(struct nouveau_context *nv,
504 struct nv04_resource *buf,
505 unsigned base, unsigned size)
506 {
507 struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
508 int ret;
509
510 assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
511
512 buf->base.width0 = base + size;
513 if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
514 return FALSE;
515
516 ret = nouveau_bo_map(buf->bo, 0, nv->screen->client);
517 if (ret)
518 return FALSE;
519 memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
520
521 return TRUE;
522 }
523
524
525 /* Scratch data allocation. */
526
527 static INLINE int
528 nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
529 unsigned size)
530 {
531 return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
532 4096, size, NULL, pbo);
533 }
534
535 void
536 nouveau_scratch_runout_release(struct nouveau_context *nv)
537 {
538 if (!nv->scratch.nr_runout)
539 return;
540 do {
541 --nv->scratch.nr_runout;
542 nouveau_bo_ref(NULL, &nv->scratch.runout[nv->scratch.nr_runout]);
543 } while (nv->scratch.nr_runout);
544
545 FREE(nv->scratch.runout);
546 nv->scratch.end = 0;
547 nv->scratch.runout = NULL;
548 }
549
550 /* Allocate an extra bo if we can't fit everything we need simultaneously.
551 * (Could happen for very large user arrays.)
552 */
553 static INLINE boolean
554 nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
555 {
556 int ret;
557 const unsigned n = nv->scratch.nr_runout++;
558
559 nv->scratch.runout = REALLOC(nv->scratch.runout,
560 (n + 0) * sizeof(*nv->scratch.runout),
561 (n + 1) * sizeof(*nv->scratch.runout));
562 nv->scratch.runout[n] = NULL;
563
564 ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout[n], size);
565 if (!ret) {
566 ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL);
567 if (ret)
568 nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]);
569 }
570 if (!ret) {
571 nv->scratch.current = nv->scratch.runout[n];
572 nv->scratch.offset = 0;
573 nv->scratch.end = size;
574 nv->scratch.map = nv->scratch.current->map;
575 }
576 return !ret;
577 }
578
579 /* Continue to next scratch buffer, if available (no wrapping, large enough).
580 * Allocate it if it has not yet been created.
581 */
582 static INLINE boolean
583 nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
584 {
585 struct nouveau_bo *bo;
586 int ret;
587 const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
588
589 if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
590 return FALSE;
591 nv->scratch.id = i;
592
593 bo = nv->scratch.bo[i];
594 if (!bo) {
595 ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
596 if (ret)
597 return FALSE;
598 nv->scratch.bo[i] = bo;
599 }
600 nv->scratch.current = bo;
601 nv->scratch.offset = 0;
602 nv->scratch.end = nv->scratch.bo_size;
603
604 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->screen->client);
605 if (!ret)
606 nv->scratch.map = bo->map;
607 return !ret;
608 }
609
610 static boolean
611 nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
612 {
613 boolean ret;
614
615 ret = nouveau_scratch_next(nv, min_size);
616 if (!ret)
617 ret = nouveau_scratch_runout(nv, min_size);
618 return ret;
619 }
620
621
622 /* Copy data to a scratch buffer and return address & bo the data resides in. */
623 uint64_t
624 nouveau_scratch_data(struct nouveau_context *nv,
625 const void *data, unsigned base, unsigned size,
626 struct nouveau_bo **bo)
627 {
628 unsigned bgn = MAX2(base, nv->scratch.offset);
629 unsigned end = bgn + size;
630
631 if (end >= nv->scratch.end) {
632 end = base + size;
633 if (!nouveau_scratch_more(nv, end))
634 return 0;
635 bgn = base;
636 }
637 nv->scratch.offset = align(end, 4);
638
639 memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
640
641 *bo = nv->scratch.current;
642 return (*bo)->offset + (bgn - base);
643 }
644
645 void *
646 nouveau_scratch_get(struct nouveau_context *nv,
647 unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
648 {
649 unsigned bgn = nv->scratch.offset;
650 unsigned end = nv->scratch.offset + size;
651
652 if (end >= nv->scratch.end) {
653 end = size;
654 if (!nouveau_scratch_more(nv, end))
655 return NULL;
656 bgn = 0;
657 }
658 nv->scratch.offset = align(end, 4);
659
660 *pbo = nv->scratch.current;
661 *gpu_addr = nv->scratch.current->offset + bgn;
662 return nv->scratch.map + bgn;
663 }