nouveau: use bool instead of boolean
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.c
1
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
5 #include "util/u_surface.h"
6
7 #include "nouveau_screen.h"
8 #include "nouveau_context.h"
9 #include "nouveau_winsys.h"
10 #include "nouveau_fence.h"
11 #include "nouveau_buffer.h"
12 #include "nouveau_mm.h"
13
14 #define NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD 192
15
16 struct nouveau_transfer {
17 struct pipe_transfer base;
18
19 uint8_t *map;
20 struct nouveau_bo *bo;
21 struct nouveau_mm_allocation *mm;
22 uint32_t offset;
23 };
24
25 static INLINE struct nouveau_transfer *
26 nouveau_transfer(struct pipe_transfer *transfer)
27 {
28 return (struct nouveau_transfer *)transfer;
29 }
30
31 static INLINE bool
32 nouveau_buffer_malloc(struct nv04_resource *buf)
33 {
34 if (!buf->data)
35 buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
36 return !!buf->data;
37 }
38
39 static INLINE bool
40 nouveau_buffer_allocate(struct nouveau_screen *screen,
41 struct nv04_resource *buf, unsigned domain)
42 {
43 uint32_t size = align(buf->base.width0, 0x100);
44
45 if (domain == NOUVEAU_BO_VRAM) {
46 buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
47 &buf->bo, &buf->offset);
48 if (!buf->bo)
49 return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
50 NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
51 } else
52 if (domain == NOUVEAU_BO_GART) {
53 buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
54 &buf->bo, &buf->offset);
55 if (!buf->bo)
56 return false;
57 NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
58 } else {
59 assert(domain == 0);
60 if (!nouveau_buffer_malloc(buf))
61 return false;
62 }
63 buf->domain = domain;
64 if (buf->bo)
65 buf->address = buf->bo->offset + buf->offset;
66
67 util_range_set_empty(&buf->valid_buffer_range);
68
69 return true;
70 }
71
72 static INLINE void
73 release_allocation(struct nouveau_mm_allocation **mm,
74 struct nouveau_fence *fence)
75 {
76 nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
77 (*mm) = NULL;
78 }
79
80 INLINE void
81 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
82 {
83 nouveau_bo_ref(NULL, &buf->bo);
84
85 if (buf->mm)
86 release_allocation(&buf->mm, buf->fence);
87
88 if (buf->domain == NOUVEAU_BO_VRAM)
89 NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
90 if (buf->domain == NOUVEAU_BO_GART)
91 NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
92
93 buf->domain = 0;
94 }
95
96 static INLINE bool
97 nouveau_buffer_reallocate(struct nouveau_screen *screen,
98 struct nv04_resource *buf, unsigned domain)
99 {
100 nouveau_buffer_release_gpu_storage(buf);
101
102 nouveau_fence_ref(NULL, &buf->fence);
103 nouveau_fence_ref(NULL, &buf->fence_wr);
104
105 buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
106
107 return nouveau_buffer_allocate(screen, buf, domain);
108 }
109
110 static void
111 nouveau_buffer_destroy(struct pipe_screen *pscreen,
112 struct pipe_resource *presource)
113 {
114 struct nv04_resource *res = nv04_resource(presource);
115
116 nouveau_buffer_release_gpu_storage(res);
117
118 if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
119 align_free(res->data);
120
121 nouveau_fence_ref(NULL, &res->fence);
122 nouveau_fence_ref(NULL, &res->fence_wr);
123
124 util_range_destroy(&res->valid_buffer_range);
125
126 FREE(res);
127
128 NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
129 }
130
131 /* Set up a staging area for the transfer. This is either done in "regular"
132 * system memory if the driver supports push_data (nv50+) and the data is
133 * small enough (and permit_pb == true), or in GART memory.
134 */
135 static uint8_t *
136 nouveau_transfer_staging(struct nouveau_context *nv,
137 struct nouveau_transfer *tx, bool permit_pb)
138 {
139 const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
140 const unsigned size = align(tx->base.box.width, 4) + adj;
141
142 if (!nv->push_data)
143 permit_pb = false;
144
145 if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
146 tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
147 if (tx->map)
148 tx->map += adj;
149 } else {
150 tx->mm =
151 nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
152 if (tx->bo) {
153 tx->offset += adj;
154 if (!nouveau_bo_map(tx->bo, 0, NULL))
155 tx->map = (uint8_t *)tx->bo->map + tx->offset;
156 }
157 }
158 return tx->map;
159 }
160
161 /* Copies data from the resource into the the transfer's temporary GART
162 * buffer. Also updates buf->data if present.
163 *
164 * Maybe just migrate to GART right away if we actually need to do this. */
165 static bool
166 nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
167 {
168 struct nv04_resource *buf = nv04_resource(tx->base.resource);
169 const unsigned base = tx->base.box.x;
170 const unsigned size = tx->base.box.width;
171
172 NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size);
173
174 nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART,
175 buf->bo, buf->offset + base, buf->domain, size);
176
177 if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
178 return false;
179
180 if (buf->data)
181 memcpy(buf->data + base, tx->map, size);
182
183 return true;
184 }
185
186 static void
187 nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
188 unsigned offset, unsigned size)
189 {
190 struct nv04_resource *buf = nv04_resource(tx->base.resource);
191 uint8_t *data = tx->map + offset;
192 const unsigned base = tx->base.box.x + offset;
193 const bool can_cb = !((base | size) & 3);
194
195 if (buf->data)
196 memcpy(data, buf->data + base, size);
197 else
198 buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
199
200 if (buf->domain == NOUVEAU_BO_VRAM)
201 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
202 if (buf->domain == NOUVEAU_BO_GART)
203 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
204
205 if (tx->bo)
206 nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
207 tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
208 else
209 if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb)
210 nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
211 base, size / 4, (const uint32_t *)data);
212 else
213 nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
214
215 nouveau_fence_ref(nv->screen->fence.current, &buf->fence);
216 nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
217 }
218
219 /* Does a CPU wait for the buffer's backing data to become reliably accessible
220 * for write/read by waiting on the buffer's relevant fences.
221 */
222 static INLINE bool
223 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
224 {
225 if (rw == PIPE_TRANSFER_READ) {
226 if (!buf->fence_wr)
227 return true;
228 NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
229 !nouveau_fence_signalled(buf->fence_wr));
230 if (!nouveau_fence_wait(buf->fence_wr))
231 return false;
232 } else {
233 if (!buf->fence)
234 return true;
235 NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
236 !nouveau_fence_signalled(buf->fence));
237 if (!nouveau_fence_wait(buf->fence))
238 return false;
239
240 nouveau_fence_ref(NULL, &buf->fence);
241 }
242 nouveau_fence_ref(NULL, &buf->fence_wr);
243
244 return true;
245 }
246
247 static INLINE bool
248 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
249 {
250 if (rw == PIPE_TRANSFER_READ)
251 return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
252 else
253 return (buf->fence && !nouveau_fence_signalled(buf->fence));
254 }
255
256 static INLINE void
257 nouveau_buffer_transfer_init(struct nouveau_transfer *tx,
258 struct pipe_resource *resource,
259 const struct pipe_box *box,
260 unsigned usage)
261 {
262 tx->base.resource = resource;
263 tx->base.level = 0;
264 tx->base.usage = usage;
265 tx->base.box.x = box->x;
266 tx->base.box.y = 0;
267 tx->base.box.z = 0;
268 tx->base.box.width = box->width;
269 tx->base.box.height = 1;
270 tx->base.box.depth = 1;
271 tx->base.stride = 0;
272 tx->base.layer_stride = 0;
273
274 tx->bo = NULL;
275 tx->map = NULL;
276 }
277
278 static INLINE void
279 nouveau_buffer_transfer_del(struct nouveau_context *nv,
280 struct nouveau_transfer *tx)
281 {
282 if (tx->map) {
283 if (likely(tx->bo)) {
284 nouveau_bo_ref(NULL, &tx->bo);
285 if (tx->mm)
286 release_allocation(&tx->mm, nv->screen->fence.current);
287 } else {
288 align_free(tx->map -
289 (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
290 }
291 }
292 }
293
294 /* Creates a cache in system memory of the buffer data. */
295 static bool
296 nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
297 {
298 struct nouveau_transfer tx;
299 bool ret;
300 tx.base.resource = &buf->base;
301 tx.base.box.x = 0;
302 tx.base.box.width = buf->base.width0;
303 tx.bo = NULL;
304 tx.map = NULL;
305
306 if (!buf->data)
307 if (!nouveau_buffer_malloc(buf))
308 return false;
309 if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
310 return true;
311 nv->stats.buf_cache_count++;
312
313 if (!nouveau_transfer_staging(nv, &tx, false))
314 return false;
315
316 ret = nouveau_transfer_read(nv, &tx);
317 if (ret) {
318 buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY;
319 memcpy(buf->data, tx.map, buf->base.width0);
320 }
321 nouveau_buffer_transfer_del(nv, &tx);
322 return ret;
323 }
324
325
326 #define NOUVEAU_TRANSFER_DISCARD \
327 (PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
328
329 /* Checks whether it is possible to completely discard the memory backing this
330 * resource. This can be useful if we would otherwise have to wait for a read
331 * operation to complete on this data.
332 */
333 static INLINE bool
334 nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
335 {
336 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
337 return false;
338 if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
339 return false;
340 if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
341 return false;
342 return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
343 }
344
345 /* Returns a pointer to a memory area representing a window into the
346 * resource's data.
347 *
348 * This may or may not be the _actual_ memory area of the resource. However
349 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
350 * area, the contents of the returned map are copied over to the resource.
351 *
352 * The usage indicates what the caller plans to do with the map:
353 *
354 * WRITE means that the user plans to write to it
355 *
356 * READ means that the user plans on reading from it
357 *
358 * DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
359 * potentially overwritten, and even if it isn't, the bits that aren't don't
360 * need to be maintained.
361 *
362 * DISCARD_RANGE means that all the data in the specified range is going to
363 * be overwritten.
364 *
365 * The strategy for determining what kind of memory area to return is complex,
366 * see comments inside of the function.
367 */
368 static void *
369 nouveau_buffer_transfer_map(struct pipe_context *pipe,
370 struct pipe_resource *resource,
371 unsigned level, unsigned usage,
372 const struct pipe_box *box,
373 struct pipe_transfer **ptransfer)
374 {
375 struct nouveau_context *nv = nouveau_context(pipe);
376 struct nv04_resource *buf = nv04_resource(resource);
377 struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
378 uint8_t *map;
379 int ret;
380
381 if (!tx)
382 return NULL;
383 nouveau_buffer_transfer_init(tx, resource, box, usage);
384 *ptransfer = &tx->base;
385
386 if (usage & PIPE_TRANSFER_READ)
387 NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
388 if (usage & PIPE_TRANSFER_WRITE)
389 NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
390
391 /* If we are trying to write to an uninitialized range, the user shouldn't
392 * care what was there before. So we can treat the write as if the target
393 * range were being discarded. Furthermore, since we know that even if this
394 * buffer is busy due to GPU activity, because the contents were
395 * uninitialized, the GPU can't care what was there, and so we can treat
396 * the write as being unsynchronized.
397 */
398 if ((usage & PIPE_TRANSFER_WRITE) &&
399 !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
400 usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
401
402 if (usage & PIPE_TRANSFER_PERSISTENT)
403 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
404
405 if (buf->domain == NOUVEAU_BO_VRAM) {
406 if (usage & NOUVEAU_TRANSFER_DISCARD) {
407 /* Set up a staging area for the user to write to. It will be copied
408 * back into VRAM on unmap. */
409 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
410 buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
411 nouveau_transfer_staging(nv, tx, true);
412 } else {
413 if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
414 /* The GPU is currently writing to this buffer. Copy its current
415 * contents to a staging area in the GART. This is necessary since
416 * not the whole area being mapped is being discarded.
417 */
418 if (buf->data) {
419 align_free(buf->data);
420 buf->data = NULL;
421 }
422 nouveau_transfer_staging(nv, tx, false);
423 nouveau_transfer_read(nv, tx);
424 } else {
425 /* The buffer is currently idle. Create a staging area for writes,
426 * and make sure that the cached data is up-to-date. */
427 if (usage & PIPE_TRANSFER_WRITE)
428 nouveau_transfer_staging(nv, tx, true);
429 if (!buf->data)
430 nouveau_buffer_cache(nv, buf);
431 }
432 }
433 return buf->data ? (buf->data + box->x) : tx->map;
434 } else
435 if (unlikely(buf->domain == 0)) {
436 return buf->data + box->x;
437 }
438
439 /* At this point, buf->domain == GART */
440
441 if (nouveau_buffer_should_discard(buf, usage)) {
442 int ref = buf->base.reference.count - 1;
443 nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
444 if (ref > 0) /* any references inside context possible ? */
445 nv->invalidate_resource_storage(nv, &buf->base, ref);
446 }
447
448 /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
449 * relevant flags. If buf->mm is set, that means this resource is part of a
450 * larger slab bo that holds multiple resources. So in that case, don't
451 * wait on the whole slab and instead use the logic below to return a
452 * reasonable buffer for that case.
453 */
454 ret = nouveau_bo_map(buf->bo,
455 buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
456 nv->client);
457 if (ret) {
458 FREE(tx);
459 return NULL;
460 }
461 map = (uint8_t *)buf->bo->map + buf->offset + box->x;
462
463 /* using kernel fences only if !buf->mm */
464 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
465 return map;
466
467 /* If the GPU is currently reading/writing this buffer, we shouldn't
468 * interfere with its progress. So instead we either wait for the GPU to
469 * complete its operation, or set up a staging area to perform our work in.
470 */
471 if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
472 if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
473 /* Discarding was not possible, must sync because
474 * subsequent transfers might use UNSYNCHRONIZED. */
475 nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
476 } else
477 if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
478 /* The whole range is being discarded, so it doesn't matter what was
479 * there before. No need to copy anything over. */
480 nouveau_transfer_staging(nv, tx, true);
481 map = tx->map;
482 } else
483 if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
484 if (usage & PIPE_TRANSFER_DONTBLOCK)
485 map = NULL;
486 else
487 nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
488 } else {
489 /* It is expected that the returned buffer be a representation of the
490 * data in question, so we must copy it over from the buffer. */
491 nouveau_transfer_staging(nv, tx, true);
492 if (tx->map)
493 memcpy(tx->map, map, box->width);
494 map = tx->map;
495 }
496 }
497 if (!map)
498 FREE(tx);
499 return map;
500 }
501
502
503
504 static void
505 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
506 struct pipe_transfer *transfer,
507 const struct pipe_box *box)
508 {
509 struct nouveau_transfer *tx = nouveau_transfer(transfer);
510 struct nv04_resource *buf = nv04_resource(transfer->resource);
511
512 if (tx->map)
513 nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
514
515 util_range_add(&buf->valid_buffer_range,
516 tx->base.box.x + box->x,
517 tx->base.box.x + box->x + box->width);
518 }
519
520 /* Unmap stage of the transfer. If it was a WRITE transfer and the map that
521 * was returned was not the real resource's data, this needs to transfer the
522 * data back to the resource.
523 *
524 * Also marks vbo dirty based on the buffer's binding
525 */
526 static void
527 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
528 struct pipe_transfer *transfer)
529 {
530 struct nouveau_context *nv = nouveau_context(pipe);
531 struct nouveau_transfer *tx = nouveau_transfer(transfer);
532 struct nv04_resource *buf = nv04_resource(transfer->resource);
533
534 if (tx->base.usage & PIPE_TRANSFER_WRITE) {
535 if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map)
536 nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
537
538 if (likely(buf->domain)) {
539 const uint8_t bind = buf->base.bind;
540 /* make sure we invalidate dedicated caches */
541 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
542 nv->vbo_dirty = true;
543 }
544
545 util_range_add(&buf->valid_buffer_range,
546 tx->base.box.x, tx->base.box.x + tx->base.box.width);
547 }
548
549 if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
550 NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
551
552 nouveau_buffer_transfer_del(nv, tx);
553 FREE(tx);
554 }
555
556
557 void
558 nouveau_copy_buffer(struct nouveau_context *nv,
559 struct nv04_resource *dst, unsigned dstx,
560 struct nv04_resource *src, unsigned srcx, unsigned size)
561 {
562 assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
563
564 if (likely(dst->domain) && likely(src->domain)) {
565 nv->copy_data(nv,
566 dst->bo, dst->offset + dstx, dst->domain,
567 src->bo, src->offset + srcx, src->domain, size);
568
569 dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
570 nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
571 nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
572
573 src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
574 nouveau_fence_ref(nv->screen->fence.current, &src->fence);
575 } else {
576 struct pipe_box src_box;
577 src_box.x = srcx;
578 src_box.y = 0;
579 src_box.z = 0;
580 src_box.width = size;
581 src_box.height = 1;
582 src_box.depth = 1;
583 util_resource_copy_region(&nv->pipe,
584 &dst->base, 0, dstx, 0, 0,
585 &src->base, 0, &src_box);
586 }
587
588 util_range_add(&dst->valid_buffer_range, dstx, dstx + size);
589 }
590
591
592 void *
593 nouveau_resource_map_offset(struct nouveau_context *nv,
594 struct nv04_resource *res, uint32_t offset,
595 uint32_t flags)
596 {
597 if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
598 return res->data + offset;
599
600 if (res->domain == NOUVEAU_BO_VRAM) {
601 if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
602 nouveau_buffer_cache(nv, res);
603 }
604 if (res->domain != NOUVEAU_BO_GART)
605 return res->data + offset;
606
607 if (res->mm) {
608 unsigned rw;
609 rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
610 nouveau_buffer_sync(res, rw);
611 if (nouveau_bo_map(res->bo, 0, NULL))
612 return NULL;
613 } else {
614 if (nouveau_bo_map(res->bo, flags, nv->client))
615 return NULL;
616 }
617 return (uint8_t *)res->bo->map + res->offset + offset;
618 }
619
620
621 const struct u_resource_vtbl nouveau_buffer_vtbl =
622 {
623 u_default_resource_get_handle, /* get_handle */
624 nouveau_buffer_destroy, /* resource_destroy */
625 nouveau_buffer_transfer_map, /* transfer_map */
626 nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
627 nouveau_buffer_transfer_unmap, /* transfer_unmap */
628 u_default_transfer_inline_write /* transfer_inline_write */
629 };
630
631 struct pipe_resource *
632 nouveau_buffer_create(struct pipe_screen *pscreen,
633 const struct pipe_resource *templ)
634 {
635 struct nouveau_screen *screen = nouveau_screen(pscreen);
636 struct nv04_resource *buffer;
637 bool ret;
638
639 buffer = CALLOC_STRUCT(nv04_resource);
640 if (!buffer)
641 return NULL;
642
643 buffer->base = *templ;
644 buffer->vtbl = &nouveau_buffer_vtbl;
645 pipe_reference_init(&buffer->base.reference, 1);
646 buffer->base.screen = pscreen;
647
648 if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
649 PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
650 buffer->domain = NOUVEAU_BO_GART;
651 } else if (buffer->base.bind &
652 (screen->vidmem_bindings & screen->sysmem_bindings)) {
653 switch (buffer->base.usage) {
654 case PIPE_USAGE_DEFAULT:
655 case PIPE_USAGE_IMMUTABLE:
656 buffer->domain = NV_VRAM_DOMAIN(screen);
657 break;
658 case PIPE_USAGE_DYNAMIC:
659 /* For most apps, we'd have to do staging transfers to avoid sync
660 * with this usage, and GART -> GART copies would be suboptimal.
661 */
662 buffer->domain = NV_VRAM_DOMAIN(screen);
663 break;
664 case PIPE_USAGE_STAGING:
665 case PIPE_USAGE_STREAM:
666 buffer->domain = NOUVEAU_BO_GART;
667 break;
668 default:
669 assert(0);
670 break;
671 }
672 } else {
673 if (buffer->base.bind & screen->vidmem_bindings)
674 buffer->domain = NV_VRAM_DOMAIN(screen);
675 else
676 if (buffer->base.bind & screen->sysmem_bindings)
677 buffer->domain = NOUVEAU_BO_GART;
678 }
679 ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
680
681 if (ret == false)
682 goto fail;
683
684 if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
685 nouveau_buffer_cache(NULL, buffer);
686
687 NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
688
689 util_range_init(&buffer->valid_buffer_range);
690
691 return &buffer->base;
692
693 fail:
694 FREE(buffer);
695 return NULL;
696 }
697
698
699 struct pipe_resource *
700 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
701 unsigned bytes, unsigned bind)
702 {
703 struct nv04_resource *buffer;
704
705 buffer = CALLOC_STRUCT(nv04_resource);
706 if (!buffer)
707 return NULL;
708
709 pipe_reference_init(&buffer->base.reference, 1);
710 buffer->vtbl = &nouveau_buffer_vtbl;
711 buffer->base.screen = pscreen;
712 buffer->base.format = PIPE_FORMAT_R8_UNORM;
713 buffer->base.usage = PIPE_USAGE_IMMUTABLE;
714 buffer->base.bind = bind;
715 buffer->base.width0 = bytes;
716 buffer->base.height0 = 1;
717 buffer->base.depth0 = 1;
718
719 buffer->data = ptr;
720 buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
721
722 util_range_init(&buffer->valid_buffer_range);
723 util_range_add(&buffer->valid_buffer_range, 0, bytes);
724
725 return &buffer->base;
726 }
727
728 static INLINE bool
729 nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
730 struct nouveau_bo *bo, unsigned offset, unsigned size)
731 {
732 if (!nouveau_buffer_malloc(buf))
733 return false;
734 if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
735 return false;
736 memcpy(buf->data, (uint8_t *)bo->map + offset, size);
737 return true;
738 }
739
740 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
741 bool
742 nouveau_buffer_migrate(struct nouveau_context *nv,
743 struct nv04_resource *buf, const unsigned new_domain)
744 {
745 struct nouveau_screen *screen = nv->screen;
746 struct nouveau_bo *bo;
747 const unsigned old_domain = buf->domain;
748 unsigned size = buf->base.width0;
749 unsigned offset;
750 int ret;
751
752 assert(new_domain != old_domain);
753
754 if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
755 if (!nouveau_buffer_allocate(screen, buf, new_domain))
756 return false;
757 ret = nouveau_bo_map(buf->bo, 0, nv->client);
758 if (ret)
759 return ret;
760 memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
761 align_free(buf->data);
762 } else
763 if (old_domain != 0 && new_domain != 0) {
764 struct nouveau_mm_allocation *mm = buf->mm;
765
766 if (new_domain == NOUVEAU_BO_VRAM) {
767 /* keep a system memory copy of our data in case we hit a fallback */
768 if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
769 return false;
770 if (nouveau_mesa_debug)
771 debug_printf("migrating %u KiB to VRAM\n", size / 1024);
772 }
773
774 offset = buf->offset;
775 bo = buf->bo;
776 buf->bo = NULL;
777 buf->mm = NULL;
778 nouveau_buffer_allocate(screen, buf, new_domain);
779
780 nv->copy_data(nv, buf->bo, buf->offset, new_domain,
781 bo, offset, old_domain, buf->base.width0);
782
783 nouveau_bo_ref(NULL, &bo);
784 if (mm)
785 release_allocation(&mm, screen->fence.current);
786 } else
787 if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
788 struct nouveau_transfer tx;
789 if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
790 return false;
791 tx.base.resource = &buf->base;
792 tx.base.box.x = 0;
793 tx.base.box.width = buf->base.width0;
794 tx.bo = NULL;
795 tx.map = NULL;
796 if (!nouveau_transfer_staging(nv, &tx, false))
797 return false;
798 nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
799 nouveau_buffer_transfer_del(nv, &tx);
800 } else
801 return false;
802
803 assert(buf->domain == new_domain);
804 return true;
805 }
806
807 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
808 * We'd like to only allocate @size bytes here, but then we'd have to rebase
809 * the vertex indices ...
810 */
811 bool
812 nouveau_user_buffer_upload(struct nouveau_context *nv,
813 struct nv04_resource *buf,
814 unsigned base, unsigned size)
815 {
816 struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
817 int ret;
818
819 assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
820
821 buf->base.width0 = base + size;
822 if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
823 return false;
824
825 ret = nouveau_bo_map(buf->bo, 0, nv->client);
826 if (ret)
827 return false;
828 memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
829
830 return true;
831 }
832
833
834 /* Scratch data allocation. */
835
836 static INLINE int
837 nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
838 unsigned size)
839 {
840 return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
841 4096, size, NULL, pbo);
842 }
843
844 static void
845 nouveau_scratch_unref_bos(void *d)
846 {
847 struct runout *b = d;
848 int i;
849
850 for (i = 0; i < b->nr; ++i)
851 nouveau_bo_ref(NULL, &b->bo[i]);
852
853 FREE(b);
854 }
855
856 void
857 nouveau_scratch_runout_release(struct nouveau_context *nv)
858 {
859 if (!nv->scratch.runout)
860 return;
861
862 if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
863 nv->scratch.runout))
864 return;
865
866 nv->scratch.end = 0;
867 nv->scratch.runout = NULL;
868 }
869
870 /* Allocate an extra bo if we can't fit everything we need simultaneously.
871 * (Could happen for very large user arrays.)
872 */
873 static INLINE bool
874 nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
875 {
876 int ret;
877 unsigned n;
878
879 if (nv->scratch.runout)
880 n = nv->scratch.runout->nr;
881 else
882 n = 0;
883 nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 :
884 (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)),
885 sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *));
886 nv->scratch.runout->nr = n + 1;
887 nv->scratch.runout->bo[n] = NULL;
888
889 ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size);
890 if (!ret) {
891 ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL);
892 if (ret)
893 nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]);
894 }
895 if (!ret) {
896 nv->scratch.current = nv->scratch.runout->bo[n];
897 nv->scratch.offset = 0;
898 nv->scratch.end = size;
899 nv->scratch.map = nv->scratch.current->map;
900 }
901 return !ret;
902 }
903
904 /* Continue to next scratch buffer, if available (no wrapping, large enough).
905 * Allocate it if it has not yet been created.
906 */
907 static INLINE bool
908 nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
909 {
910 struct nouveau_bo *bo;
911 int ret;
912 const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
913
914 if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
915 return false;
916 nv->scratch.id = i;
917
918 bo = nv->scratch.bo[i];
919 if (!bo) {
920 ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
921 if (ret)
922 return false;
923 nv->scratch.bo[i] = bo;
924 }
925 nv->scratch.current = bo;
926 nv->scratch.offset = 0;
927 nv->scratch.end = nv->scratch.bo_size;
928
929 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->client);
930 if (!ret)
931 nv->scratch.map = bo->map;
932 return !ret;
933 }
934
935 static bool
936 nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
937 {
938 bool ret;
939
940 ret = nouveau_scratch_next(nv, min_size);
941 if (!ret)
942 ret = nouveau_scratch_runout(nv, min_size);
943 return ret;
944 }
945
946
947 /* Copy data to a scratch buffer and return address & bo the data resides in. */
948 uint64_t
949 nouveau_scratch_data(struct nouveau_context *nv,
950 const void *data, unsigned base, unsigned size,
951 struct nouveau_bo **bo)
952 {
953 unsigned bgn = MAX2(base, nv->scratch.offset);
954 unsigned end = bgn + size;
955
956 if (end >= nv->scratch.end) {
957 end = base + size;
958 if (!nouveau_scratch_more(nv, end))
959 return 0;
960 bgn = base;
961 }
962 nv->scratch.offset = align(end, 4);
963
964 memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
965
966 *bo = nv->scratch.current;
967 return (*bo)->offset + (bgn - base);
968 }
969
970 void *
971 nouveau_scratch_get(struct nouveau_context *nv,
972 unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
973 {
974 unsigned bgn = nv->scratch.offset;
975 unsigned end = nv->scratch.offset + size;
976
977 if (end >= nv->scratch.end) {
978 end = size;
979 if (!nouveau_scratch_more(nv, end))
980 return NULL;
981 bgn = 0;
982 }
983 nv->scratch.offset = align(end, 4);
984
985 *pbo = nv->scratch.current;
986 *gpu_addr = nv->scratch.current->offset + bgn;
987 return nv->scratch.map + bgn;
988 }