gallium: add BIND flags for R/W buffers and images
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.c
1
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
5 #include "util/u_surface.h"
6
7 #include "nouveau_screen.h"
8 #include "nouveau_context.h"
9 #include "nouveau_winsys.h"
10 #include "nouveau_fence.h"
11 #include "nouveau_buffer.h"
12 #include "nouveau_mm.h"
13
14 #define NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD 192
15
16 struct nouveau_transfer {
17 struct pipe_transfer base;
18
19 uint8_t *map;
20 struct nouveau_bo *bo;
21 struct nouveau_mm_allocation *mm;
22 uint32_t offset;
23 };
24
25 static INLINE struct nouveau_transfer *
26 nouveau_transfer(struct pipe_transfer *transfer)
27 {
28 return (struct nouveau_transfer *)transfer;
29 }
30
31 static INLINE boolean
32 nouveau_buffer_malloc(struct nv04_resource *buf)
33 {
34 if (!buf->data)
35 buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
36 return !!buf->data;
37 }
38
39 static INLINE boolean
40 nouveau_buffer_allocate(struct nouveau_screen *screen,
41 struct nv04_resource *buf, unsigned domain)
42 {
43 uint32_t size = buf->base.width0;
44
45 if (buf->base.bind & (PIPE_BIND_CONSTANT_BUFFER |
46 PIPE_BIND_COMPUTE_RESOURCE |
47 PIPE_BIND_SHADER_BUFFER |
48 PIPE_BIND_SHADER_IMAGE))
49 size = align(size, 0x100);
50
51 if (domain == NOUVEAU_BO_VRAM) {
52 buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
53 &buf->bo, &buf->offset);
54 if (!buf->bo)
55 return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
56 NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
57 } else
58 if (domain == NOUVEAU_BO_GART) {
59 buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
60 &buf->bo, &buf->offset);
61 if (!buf->bo)
62 return FALSE;
63 NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
64 } else {
65 assert(domain == 0);
66 if (!nouveau_buffer_malloc(buf))
67 return FALSE;
68 }
69 buf->domain = domain;
70 if (buf->bo)
71 buf->address = buf->bo->offset + buf->offset;
72
73 util_range_set_empty(&buf->valid_buffer_range);
74
75 return TRUE;
76 }
77
78 static INLINE void
79 release_allocation(struct nouveau_mm_allocation **mm,
80 struct nouveau_fence *fence)
81 {
82 nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
83 (*mm) = NULL;
84 }
85
86 INLINE void
87 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
88 {
89 nouveau_bo_ref(NULL, &buf->bo);
90
91 if (buf->mm)
92 release_allocation(&buf->mm, buf->fence);
93
94 if (buf->domain == NOUVEAU_BO_VRAM)
95 NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
96 if (buf->domain == NOUVEAU_BO_GART)
97 NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
98
99 buf->domain = 0;
100 }
101
102 static INLINE boolean
103 nouveau_buffer_reallocate(struct nouveau_screen *screen,
104 struct nv04_resource *buf, unsigned domain)
105 {
106 nouveau_buffer_release_gpu_storage(buf);
107
108 nouveau_fence_ref(NULL, &buf->fence);
109 nouveau_fence_ref(NULL, &buf->fence_wr);
110
111 buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
112
113 return nouveau_buffer_allocate(screen, buf, domain);
114 }
115
116 static void
117 nouveau_buffer_destroy(struct pipe_screen *pscreen,
118 struct pipe_resource *presource)
119 {
120 struct nv04_resource *res = nv04_resource(presource);
121
122 nouveau_buffer_release_gpu_storage(res);
123
124 if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
125 align_free(res->data);
126
127 nouveau_fence_ref(NULL, &res->fence);
128 nouveau_fence_ref(NULL, &res->fence_wr);
129
130 util_range_destroy(&res->valid_buffer_range);
131
132 FREE(res);
133
134 NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
135 }
136
137 /* Set up a staging area for the transfer. This is either done in "regular"
138 * system memory if the driver supports push_data (nv50+) and the data is
139 * small enough (and permit_pb == true), or in GART memory.
140 */
141 static uint8_t *
142 nouveau_transfer_staging(struct nouveau_context *nv,
143 struct nouveau_transfer *tx, boolean permit_pb)
144 {
145 const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
146 const unsigned size = align(tx->base.box.width, 4) + adj;
147
148 if (!nv->push_data)
149 permit_pb = FALSE;
150
151 if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
152 tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
153 if (tx->map)
154 tx->map += adj;
155 } else {
156 tx->mm =
157 nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
158 if (tx->bo) {
159 tx->offset += adj;
160 if (!nouveau_bo_map(tx->bo, 0, NULL))
161 tx->map = (uint8_t *)tx->bo->map + tx->offset;
162 }
163 }
164 return tx->map;
165 }
166
167 /* Copies data from the resource into the the transfer's temporary GART
168 * buffer. Also updates buf->data if present.
169 *
170 * Maybe just migrate to GART right away if we actually need to do this. */
171 static boolean
172 nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
173 {
174 struct nv04_resource *buf = nv04_resource(tx->base.resource);
175 const unsigned base = tx->base.box.x;
176 const unsigned size = tx->base.box.width;
177
178 NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size);
179
180 nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART,
181 buf->bo, buf->offset + base, buf->domain, size);
182
183 if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
184 return FALSE;
185
186 if (buf->data)
187 memcpy(buf->data + base, tx->map, size);
188
189 return TRUE;
190 }
191
192 static void
193 nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
194 unsigned offset, unsigned size)
195 {
196 struct nv04_resource *buf = nv04_resource(tx->base.resource);
197 uint8_t *data = tx->map + offset;
198 const unsigned base = tx->base.box.x + offset;
199 const boolean can_cb = !((base | size) & 3);
200
201 if (buf->data)
202 memcpy(data, buf->data + base, size);
203 else
204 buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
205
206 if (buf->domain == NOUVEAU_BO_VRAM)
207 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
208 if (buf->domain == NOUVEAU_BO_GART)
209 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
210
211 if (tx->bo)
212 nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
213 tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
214 else
215 if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb)
216 nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
217 base, size / 4, (const uint32_t *)data);
218 else
219 nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
220
221 nouveau_fence_ref(nv->screen->fence.current, &buf->fence);
222 nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
223 }
224
225 /* Does a CPU wait for the buffer's backing data to become reliably accessible
226 * for write/read by waiting on the buffer's relevant fences.
227 */
228 static INLINE boolean
229 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
230 {
231 if (rw == PIPE_TRANSFER_READ) {
232 if (!buf->fence_wr)
233 return TRUE;
234 NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
235 !nouveau_fence_signalled(buf->fence_wr));
236 if (!nouveau_fence_wait(buf->fence_wr))
237 return FALSE;
238 } else {
239 if (!buf->fence)
240 return TRUE;
241 NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
242 !nouveau_fence_signalled(buf->fence));
243 if (!nouveau_fence_wait(buf->fence))
244 return FALSE;
245
246 nouveau_fence_ref(NULL, &buf->fence);
247 }
248 nouveau_fence_ref(NULL, &buf->fence_wr);
249
250 return TRUE;
251 }
252
253 static INLINE boolean
254 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
255 {
256 if (rw == PIPE_TRANSFER_READ)
257 return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
258 else
259 return (buf->fence && !nouveau_fence_signalled(buf->fence));
260 }
261
262 static INLINE void
263 nouveau_buffer_transfer_init(struct nouveau_transfer *tx,
264 struct pipe_resource *resource,
265 const struct pipe_box *box,
266 unsigned usage)
267 {
268 tx->base.resource = resource;
269 tx->base.level = 0;
270 tx->base.usage = usage;
271 tx->base.box.x = box->x;
272 tx->base.box.y = 0;
273 tx->base.box.z = 0;
274 tx->base.box.width = box->width;
275 tx->base.box.height = 1;
276 tx->base.box.depth = 1;
277 tx->base.stride = 0;
278 tx->base.layer_stride = 0;
279
280 tx->bo = NULL;
281 tx->map = NULL;
282 }
283
284 static INLINE void
285 nouveau_buffer_transfer_del(struct nouveau_context *nv,
286 struct nouveau_transfer *tx)
287 {
288 if (tx->map) {
289 if (likely(tx->bo)) {
290 nouveau_bo_ref(NULL, &tx->bo);
291 if (tx->mm)
292 release_allocation(&tx->mm, nv->screen->fence.current);
293 } else {
294 align_free(tx->map -
295 (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
296 }
297 }
298 }
299
300 /* Creates a cache in system memory of the buffer data. */
301 static boolean
302 nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
303 {
304 struct nouveau_transfer tx;
305 boolean ret;
306 tx.base.resource = &buf->base;
307 tx.base.box.x = 0;
308 tx.base.box.width = buf->base.width0;
309 tx.bo = NULL;
310 tx.map = NULL;
311
312 if (!buf->data)
313 if (!nouveau_buffer_malloc(buf))
314 return FALSE;
315 if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
316 return TRUE;
317 nv->stats.buf_cache_count++;
318
319 if (!nouveau_transfer_staging(nv, &tx, FALSE))
320 return FALSE;
321
322 ret = nouveau_transfer_read(nv, &tx);
323 if (ret) {
324 buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY;
325 memcpy(buf->data, tx.map, buf->base.width0);
326 }
327 nouveau_buffer_transfer_del(nv, &tx);
328 return ret;
329 }
330
331
332 #define NOUVEAU_TRANSFER_DISCARD \
333 (PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
334
335 /* Checks whether it is possible to completely discard the memory backing this
336 * resource. This can be useful if we would otherwise have to wait for a read
337 * operation to complete on this data.
338 */
339 static INLINE boolean
340 nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
341 {
342 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
343 return FALSE;
344 if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
345 return FALSE;
346 if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
347 return FALSE;
348 return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
349 }
350
351 /* Returns a pointer to a memory area representing a window into the
352 * resource's data.
353 *
354 * This may or may not be the _actual_ memory area of the resource. However
355 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
356 * area, the contents of the returned map are copied over to the resource.
357 *
358 * The usage indicates what the caller plans to do with the map:
359 *
360 * WRITE means that the user plans to write to it
361 *
362 * READ means that the user plans on reading from it
363 *
364 * DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
365 * potentially overwritten, and even if it isn't, the bits that aren't don't
366 * need to be maintained.
367 *
368 * DISCARD_RANGE means that all the data in the specified range is going to
369 * be overwritten.
370 *
371 * The strategy for determining what kind of memory area to return is complex,
372 * see comments inside of the function.
373 */
374 static void *
375 nouveau_buffer_transfer_map(struct pipe_context *pipe,
376 struct pipe_resource *resource,
377 unsigned level, unsigned usage,
378 const struct pipe_box *box,
379 struct pipe_transfer **ptransfer)
380 {
381 struct nouveau_context *nv = nouveau_context(pipe);
382 struct nv04_resource *buf = nv04_resource(resource);
383 struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
384 uint8_t *map;
385 int ret;
386
387 if (!tx)
388 return NULL;
389 nouveau_buffer_transfer_init(tx, resource, box, usage);
390 *ptransfer = &tx->base;
391
392 if (usage & PIPE_TRANSFER_READ)
393 NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
394 if (usage & PIPE_TRANSFER_WRITE)
395 NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
396
397 /* If we are trying to write to an uninitialized range, the user shouldn't
398 * care what was there before. So we can treat the write as if the target
399 * range were being discarded. Furthermore, since we know that even if this
400 * buffer is busy due to GPU activity, because the contents were
401 * uninitialized, the GPU can't care what was there, and so we can treat
402 * the write as being unsynchronized.
403 */
404 if ((usage & PIPE_TRANSFER_WRITE) &&
405 !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
406 usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
407
408 if (usage & PIPE_TRANSFER_PERSISTENT)
409 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
410
411 if (buf->domain == NOUVEAU_BO_VRAM) {
412 if (usage & NOUVEAU_TRANSFER_DISCARD) {
413 /* Set up a staging area for the user to write to. It will be copied
414 * back into VRAM on unmap. */
415 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
416 buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
417 nouveau_transfer_staging(nv, tx, TRUE);
418 } else {
419 if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
420 /* The GPU is currently writing to this buffer. Copy its current
421 * contents to a staging area in the GART. This is necessary since
422 * not the whole area being mapped is being discarded.
423 */
424 if (buf->data) {
425 align_free(buf->data);
426 buf->data = NULL;
427 }
428 nouveau_transfer_staging(nv, tx, FALSE);
429 nouveau_transfer_read(nv, tx);
430 } else {
431 /* The buffer is currently idle. Create a staging area for writes,
432 * and make sure that the cached data is up-to-date. */
433 if (usage & PIPE_TRANSFER_WRITE)
434 nouveau_transfer_staging(nv, tx, TRUE);
435 if (!buf->data)
436 nouveau_buffer_cache(nv, buf);
437 }
438 }
439 return buf->data ? (buf->data + box->x) : tx->map;
440 } else
441 if (unlikely(buf->domain == 0)) {
442 return buf->data + box->x;
443 }
444
445 /* At this point, buf->domain == GART */
446
447 if (nouveau_buffer_should_discard(buf, usage)) {
448 int ref = buf->base.reference.count - 1;
449 nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
450 if (ref > 0) /* any references inside context possible ? */
451 nv->invalidate_resource_storage(nv, &buf->base, ref);
452 }
453
454 /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
455 * relevant flags. If buf->mm is set, that means this resource is part of a
456 * larger slab bo that holds multiple resources. So in that case, don't
457 * wait on the whole slab and instead use the logic below to return a
458 * reasonable buffer for that case.
459 */
460 ret = nouveau_bo_map(buf->bo,
461 buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
462 nv->client);
463 if (ret) {
464 FREE(tx);
465 return NULL;
466 }
467 map = (uint8_t *)buf->bo->map + buf->offset + box->x;
468
469 /* using kernel fences only if !buf->mm */
470 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
471 return map;
472
473 /* If the GPU is currently reading/writing this buffer, we shouldn't
474 * interfere with its progress. So instead we either wait for the GPU to
475 * complete its operation, or set up a staging area to perform our work in.
476 */
477 if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
478 if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
479 /* Discarding was not possible, must sync because
480 * subsequent transfers might use UNSYNCHRONIZED. */
481 nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
482 } else
483 if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
484 /* The whole range is being discarded, so it doesn't matter what was
485 * there before. No need to copy anything over. */
486 nouveau_transfer_staging(nv, tx, TRUE);
487 map = tx->map;
488 } else
489 if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
490 if (usage & PIPE_TRANSFER_DONTBLOCK)
491 map = NULL;
492 else
493 nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
494 } else {
495 /* It is expected that the returned buffer be a representation of the
496 * data in question, so we must copy it over from the buffer. */
497 nouveau_transfer_staging(nv, tx, TRUE);
498 if (tx->map)
499 memcpy(tx->map, map, box->width);
500 map = tx->map;
501 }
502 }
503 if (!map)
504 FREE(tx);
505 return map;
506 }
507
508
509
510 static void
511 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
512 struct pipe_transfer *transfer,
513 const struct pipe_box *box)
514 {
515 struct nouveau_transfer *tx = nouveau_transfer(transfer);
516 struct nv04_resource *buf = nv04_resource(transfer->resource);
517
518 if (tx->map)
519 nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
520
521 util_range_add(&buf->valid_buffer_range,
522 tx->base.box.x + box->x,
523 tx->base.box.x + box->x + box->width);
524 }
525
526 /* Unmap stage of the transfer. If it was a WRITE transfer and the map that
527 * was returned was not the real resource's data, this needs to transfer the
528 * data back to the resource.
529 *
530 * Also marks vbo dirty based on the buffer's binding
531 */
532 static void
533 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
534 struct pipe_transfer *transfer)
535 {
536 struct nouveau_context *nv = nouveau_context(pipe);
537 struct nouveau_transfer *tx = nouveau_transfer(transfer);
538 struct nv04_resource *buf = nv04_resource(transfer->resource);
539
540 if (tx->base.usage & PIPE_TRANSFER_WRITE) {
541 if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map)
542 nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
543
544 if (likely(buf->domain)) {
545 const uint8_t bind = buf->base.bind;
546 /* make sure we invalidate dedicated caches */
547 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
548 nv->vbo_dirty = TRUE;
549 }
550
551 util_range_add(&buf->valid_buffer_range,
552 tx->base.box.x, tx->base.box.x + tx->base.box.width);
553 }
554
555 if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
556 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
557
558 nouveau_buffer_transfer_del(nv, tx);
559 FREE(tx);
560 }
561
562
563 void
564 nouveau_copy_buffer(struct nouveau_context *nv,
565 struct nv04_resource *dst, unsigned dstx,
566 struct nv04_resource *src, unsigned srcx, unsigned size)
567 {
568 assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
569
570 if (likely(dst->domain) && likely(src->domain)) {
571 nv->copy_data(nv,
572 dst->bo, dst->offset + dstx, dst->domain,
573 src->bo, src->offset + srcx, src->domain, size);
574
575 dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
576 nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
577 nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
578
579 src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
580 nouveau_fence_ref(nv->screen->fence.current, &src->fence);
581 } else {
582 struct pipe_box src_box;
583 src_box.x = srcx;
584 src_box.y = 0;
585 src_box.z = 0;
586 src_box.width = size;
587 src_box.height = 1;
588 src_box.depth = 1;
589 util_resource_copy_region(&nv->pipe,
590 &dst->base, 0, dstx, 0, 0,
591 &src->base, 0, &src_box);
592 }
593
594 util_range_add(&dst->valid_buffer_range, dstx, dstx + size);
595 }
596
597
598 void *
599 nouveau_resource_map_offset(struct nouveau_context *nv,
600 struct nv04_resource *res, uint32_t offset,
601 uint32_t flags)
602 {
603 if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
604 return res->data + offset;
605
606 if (res->domain == NOUVEAU_BO_VRAM) {
607 if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
608 nouveau_buffer_cache(nv, res);
609 }
610 if (res->domain != NOUVEAU_BO_GART)
611 return res->data + offset;
612
613 if (res->mm) {
614 unsigned rw;
615 rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
616 nouveau_buffer_sync(res, rw);
617 if (nouveau_bo_map(res->bo, 0, NULL))
618 return NULL;
619 } else {
620 if (nouveau_bo_map(res->bo, flags, nv->client))
621 return NULL;
622 }
623 return (uint8_t *)res->bo->map + res->offset + offset;
624 }
625
626
627 const struct u_resource_vtbl nouveau_buffer_vtbl =
628 {
629 u_default_resource_get_handle, /* get_handle */
630 nouveau_buffer_destroy, /* resource_destroy */
631 nouveau_buffer_transfer_map, /* transfer_map */
632 nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
633 nouveau_buffer_transfer_unmap, /* transfer_unmap */
634 u_default_transfer_inline_write /* transfer_inline_write */
635 };
636
637 struct pipe_resource *
638 nouveau_buffer_create(struct pipe_screen *pscreen,
639 const struct pipe_resource *templ)
640 {
641 struct nouveau_screen *screen = nouveau_screen(pscreen);
642 struct nv04_resource *buffer;
643 boolean ret;
644
645 buffer = CALLOC_STRUCT(nv04_resource);
646 if (!buffer)
647 return NULL;
648
649 buffer->base = *templ;
650 buffer->vtbl = &nouveau_buffer_vtbl;
651 pipe_reference_init(&buffer->base.reference, 1);
652 buffer->base.screen = pscreen;
653
654 if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
655 PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
656 buffer->domain = NOUVEAU_BO_GART;
657 } else if (buffer->base.bind &
658 (screen->vidmem_bindings & screen->sysmem_bindings)) {
659 switch (buffer->base.usage) {
660 case PIPE_USAGE_DEFAULT:
661 case PIPE_USAGE_IMMUTABLE:
662 buffer->domain = NV_VRAM_DOMAIN(screen);
663 break;
664 case PIPE_USAGE_DYNAMIC:
665 /* For most apps, we'd have to do staging transfers to avoid sync
666 * with this usage, and GART -> GART copies would be suboptimal.
667 */
668 buffer->domain = NV_VRAM_DOMAIN(screen);
669 break;
670 case PIPE_USAGE_STAGING:
671 case PIPE_USAGE_STREAM:
672 buffer->domain = NOUVEAU_BO_GART;
673 break;
674 default:
675 assert(0);
676 break;
677 }
678 } else {
679 if (buffer->base.bind & screen->vidmem_bindings)
680 buffer->domain = NV_VRAM_DOMAIN(screen);
681 else
682 if (buffer->base.bind & screen->sysmem_bindings)
683 buffer->domain = NOUVEAU_BO_GART;
684 }
685 ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
686
687 if (ret == FALSE)
688 goto fail;
689
690 if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
691 nouveau_buffer_cache(NULL, buffer);
692
693 NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
694
695 util_range_init(&buffer->valid_buffer_range);
696
697 return &buffer->base;
698
699 fail:
700 FREE(buffer);
701 return NULL;
702 }
703
704
705 struct pipe_resource *
706 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
707 unsigned bytes, unsigned bind)
708 {
709 struct nv04_resource *buffer;
710
711 buffer = CALLOC_STRUCT(nv04_resource);
712 if (!buffer)
713 return NULL;
714
715 pipe_reference_init(&buffer->base.reference, 1);
716 buffer->vtbl = &nouveau_buffer_vtbl;
717 buffer->base.screen = pscreen;
718 buffer->base.format = PIPE_FORMAT_R8_UNORM;
719 buffer->base.usage = PIPE_USAGE_IMMUTABLE;
720 buffer->base.bind = bind;
721 buffer->base.width0 = bytes;
722 buffer->base.height0 = 1;
723 buffer->base.depth0 = 1;
724
725 buffer->data = ptr;
726 buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
727
728 util_range_init(&buffer->valid_buffer_range);
729 util_range_add(&buffer->valid_buffer_range, 0, bytes);
730
731 return &buffer->base;
732 }
733
734 static INLINE boolean
735 nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
736 struct nouveau_bo *bo, unsigned offset, unsigned size)
737 {
738 if (!nouveau_buffer_malloc(buf))
739 return FALSE;
740 if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
741 return FALSE;
742 memcpy(buf->data, (uint8_t *)bo->map + offset, size);
743 return TRUE;
744 }
745
746 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
747 boolean
748 nouveau_buffer_migrate(struct nouveau_context *nv,
749 struct nv04_resource *buf, const unsigned new_domain)
750 {
751 struct nouveau_screen *screen = nv->screen;
752 struct nouveau_bo *bo;
753 const unsigned old_domain = buf->domain;
754 unsigned size = buf->base.width0;
755 unsigned offset;
756 int ret;
757
758 assert(new_domain != old_domain);
759
760 if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
761 if (!nouveau_buffer_allocate(screen, buf, new_domain))
762 return FALSE;
763 ret = nouveau_bo_map(buf->bo, 0, nv->client);
764 if (ret)
765 return ret;
766 memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
767 align_free(buf->data);
768 } else
769 if (old_domain != 0 && new_domain != 0) {
770 struct nouveau_mm_allocation *mm = buf->mm;
771
772 if (new_domain == NOUVEAU_BO_VRAM) {
773 /* keep a system memory copy of our data in case we hit a fallback */
774 if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
775 return FALSE;
776 if (nouveau_mesa_debug)
777 debug_printf("migrating %u KiB to VRAM\n", size / 1024);
778 }
779
780 offset = buf->offset;
781 bo = buf->bo;
782 buf->bo = NULL;
783 buf->mm = NULL;
784 nouveau_buffer_allocate(screen, buf, new_domain);
785
786 nv->copy_data(nv, buf->bo, buf->offset, new_domain,
787 bo, offset, old_domain, buf->base.width0);
788
789 nouveau_bo_ref(NULL, &bo);
790 if (mm)
791 release_allocation(&mm, screen->fence.current);
792 } else
793 if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
794 struct nouveau_transfer tx;
795 if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
796 return FALSE;
797 tx.base.resource = &buf->base;
798 tx.base.box.x = 0;
799 tx.base.box.width = buf->base.width0;
800 tx.bo = NULL;
801 tx.map = NULL;
802 if (!nouveau_transfer_staging(nv, &tx, FALSE))
803 return FALSE;
804 nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
805 nouveau_buffer_transfer_del(nv, &tx);
806 } else
807 return FALSE;
808
809 assert(buf->domain == new_domain);
810 return TRUE;
811 }
812
813 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
814 * We'd like to only allocate @size bytes here, but then we'd have to rebase
815 * the vertex indices ...
816 */
817 boolean
818 nouveau_user_buffer_upload(struct nouveau_context *nv,
819 struct nv04_resource *buf,
820 unsigned base, unsigned size)
821 {
822 struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
823 int ret;
824
825 assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
826
827 buf->base.width0 = base + size;
828 if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
829 return FALSE;
830
831 ret = nouveau_bo_map(buf->bo, 0, nv->client);
832 if (ret)
833 return FALSE;
834 memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
835
836 return TRUE;
837 }
838
839
840 /* Scratch data allocation. */
841
842 static INLINE int
843 nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
844 unsigned size)
845 {
846 return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
847 4096, size, NULL, pbo);
848 }
849
850 static void
851 nouveau_scratch_unref_bos(void *d)
852 {
853 struct runout *b = d;
854 int i;
855
856 for (i = 0; i < b->nr; ++i)
857 nouveau_bo_ref(NULL, &b->bo[i]);
858
859 FREE(b);
860 }
861
862 void
863 nouveau_scratch_runout_release(struct nouveau_context *nv)
864 {
865 if (!nv->scratch.runout)
866 return;
867
868 if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
869 nv->scratch.runout))
870 return;
871
872 nv->scratch.end = 0;
873 nv->scratch.runout = NULL;
874 }
875
876 /* Allocate an extra bo if we can't fit everything we need simultaneously.
877 * (Could happen for very large user arrays.)
878 */
879 static INLINE boolean
880 nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
881 {
882 int ret;
883 unsigned n;
884
885 if (nv->scratch.runout)
886 n = nv->scratch.runout->nr;
887 else
888 n = 0;
889 nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 :
890 (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)),
891 sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *));
892 nv->scratch.runout->nr = n + 1;
893 nv->scratch.runout->bo[n] = NULL;
894
895 ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size);
896 if (!ret) {
897 ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL);
898 if (ret)
899 nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]);
900 }
901 if (!ret) {
902 nv->scratch.current = nv->scratch.runout->bo[n];
903 nv->scratch.offset = 0;
904 nv->scratch.end = size;
905 nv->scratch.map = nv->scratch.current->map;
906 }
907 return !ret;
908 }
909
910 /* Continue to next scratch buffer, if available (no wrapping, large enough).
911 * Allocate it if it has not yet been created.
912 */
913 static INLINE boolean
914 nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
915 {
916 struct nouveau_bo *bo;
917 int ret;
918 const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
919
920 if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
921 return FALSE;
922 nv->scratch.id = i;
923
924 bo = nv->scratch.bo[i];
925 if (!bo) {
926 ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
927 if (ret)
928 return FALSE;
929 nv->scratch.bo[i] = bo;
930 }
931 nv->scratch.current = bo;
932 nv->scratch.offset = 0;
933 nv->scratch.end = nv->scratch.bo_size;
934
935 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->client);
936 if (!ret)
937 nv->scratch.map = bo->map;
938 return !ret;
939 }
940
941 static boolean
942 nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
943 {
944 boolean ret;
945
946 ret = nouveau_scratch_next(nv, min_size);
947 if (!ret)
948 ret = nouveau_scratch_runout(nv, min_size);
949 return ret;
950 }
951
952
953 /* Copy data to a scratch buffer and return address & bo the data resides in. */
954 uint64_t
955 nouveau_scratch_data(struct nouveau_context *nv,
956 const void *data, unsigned base, unsigned size,
957 struct nouveau_bo **bo)
958 {
959 unsigned bgn = MAX2(base, nv->scratch.offset);
960 unsigned end = bgn + size;
961
962 if (end >= nv->scratch.end) {
963 end = base + size;
964 if (!nouveau_scratch_more(nv, end))
965 return 0;
966 bgn = base;
967 }
968 nv->scratch.offset = align(end, 4);
969
970 memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
971
972 *bo = nv->scratch.current;
973 return (*bo)->offset + (bgn - base);
974 }
975
976 void *
977 nouveau_scratch_get(struct nouveau_context *nv,
978 unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
979 {
980 unsigned bgn = nv->scratch.offset;
981 unsigned end = nv->scratch.offset + size;
982
983 if (end >= nv->scratch.end) {
984 end = size;
985 if (!nouveau_scratch_more(nv, end))
986 return NULL;
987 bgn = 0;
988 }
989 nv->scratch.offset = align(end, 4);
990
991 *pbo = nv->scratch.current;
992 *gpu_addr = nv->scratch.current->offset + bgn;
993 return nv->scratch.map + bgn;
994 }