2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
6 #include "nouveau_screen.h"
7 #include "nouveau_context.h"
8 #include "nouveau_winsys.h"
9 #include "nouveau_fence.h"
10 #include "nouveau_buffer.h"
11 #include "nouveau_mm.h"
13 struct nouveau_transfer
{
14 struct pipe_transfer base
;
17 static INLINE
struct nouveau_transfer
*
18 nouveau_transfer(struct pipe_transfer
*transfer
)
20 return (struct nouveau_transfer
*)transfer
;
24 nouveau_buffer_allocate(struct nouveau_screen
*screen
,
25 struct nv04_resource
*buf
, unsigned domain
)
27 uint32_t size
= buf
->base
.width0
;
29 if (buf
->base
.bind
& PIPE_BIND_CONSTANT_BUFFER
)
30 size
= align(size
, 0x100);
32 if (domain
== NOUVEAU_BO_VRAM
) {
33 buf
->mm
= nouveau_mm_allocate(screen
->mm_VRAM
, size
,
34 &buf
->bo
, &buf
->offset
);
36 return nouveau_buffer_allocate(screen
, buf
, NOUVEAU_BO_GART
);
38 if (domain
== NOUVEAU_BO_GART
) {
39 buf
->mm
= nouveau_mm_allocate(screen
->mm_GART
, size
,
40 &buf
->bo
, &buf
->offset
);
44 if (domain
!= NOUVEAU_BO_GART
) {
46 buf
->data
= MALLOC(buf
->base
.width0
);
56 release_allocation(struct nouveau_mm_allocation
**mm
,
57 struct nouveau_fence
*fence
)
59 nouveau_fence_work(fence
, nouveau_mm_free_work
, *mm
);
64 nouveau_buffer_release_gpu_storage(struct nv04_resource
*buf
)
66 nouveau_bo_ref(NULL
, &buf
->bo
);
69 release_allocation(&buf
->mm
, buf
->fence
);
75 nouveau_buffer_reallocate(struct nouveau_screen
*screen
,
76 struct nv04_resource
*buf
, unsigned domain
)
78 nouveau_buffer_release_gpu_storage(buf
);
80 return nouveau_buffer_allocate(screen
, buf
, domain
);
84 nouveau_buffer_destroy(struct pipe_screen
*pscreen
,
85 struct pipe_resource
*presource
)
87 struct nv04_resource
*res
= nv04_resource(presource
);
89 nouveau_buffer_release_gpu_storage(res
);
91 if (res
->data
&& !(res
->status
& NOUVEAU_BUFFER_STATUS_USER_MEMORY
))
97 /* Maybe just migrate to GART right away if we actually need to do this. */
99 nouveau_buffer_download(struct nouveau_context
*nv
, struct nv04_resource
*buf
,
100 unsigned start
, unsigned size
)
102 struct nouveau_mm_allocation
*mm
;
103 struct nouveau_bo
*bounce
= NULL
;
106 assert(buf
->domain
== NOUVEAU_BO_VRAM
);
108 mm
= nouveau_mm_allocate(nv
->screen
->mm_GART
, size
, &bounce
, &offset
);
112 nv
->copy_data(nv
, bounce
, offset
, NOUVEAU_BO_GART
,
113 buf
->bo
, buf
->offset
+ start
, NOUVEAU_BO_VRAM
, size
);
115 if (nouveau_bo_map_range(bounce
, offset
, size
, NOUVEAU_BO_RD
))
117 memcpy(buf
->data
+ start
, bounce
->map
, size
);
118 nouveau_bo_unmap(bounce
);
120 buf
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
122 nouveau_bo_ref(NULL
, &bounce
);
129 nouveau_buffer_upload(struct nouveau_context
*nv
, struct nv04_resource
*buf
,
130 unsigned start
, unsigned size
)
132 struct nouveau_mm_allocation
*mm
;
133 struct nouveau_bo
*bounce
= NULL
;
137 if (buf
->base
.bind
& PIPE_BIND_CONSTANT_BUFFER
)
138 nv
->push_cb(nv
, buf
->bo
, buf
->domain
, buf
->offset
, buf
->base
.width0
,
139 start
, size
/ 4, (const uint32_t *)(buf
->data
+ start
));
141 nv
->push_data(nv
, buf
->bo
, buf
->offset
+ start
, buf
->domain
,
142 size
, buf
->data
+ start
);
146 mm
= nouveau_mm_allocate(nv
->screen
->mm_GART
, size
, &bounce
, &offset
);
150 nouveau_bo_map_range(bounce
, offset
, size
,
151 NOUVEAU_BO_WR
| NOUVEAU_BO_NOSYNC
);
152 memcpy(bounce
->map
, buf
->data
+ start
, size
);
153 nouveau_bo_unmap(bounce
);
155 nv
->copy_data(nv
, buf
->bo
, buf
->offset
+ start
, NOUVEAU_BO_VRAM
,
156 bounce
, offset
, NOUVEAU_BO_GART
, size
);
158 nouveau_bo_ref(NULL
, &bounce
);
160 release_allocation(&mm
, nv
->screen
->fence
.current
);
162 if (start
== 0 && size
== buf
->base
.width0
)
163 buf
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
167 static struct pipe_transfer
*
168 nouveau_buffer_transfer_get(struct pipe_context
*pipe
,
169 struct pipe_resource
*resource
,
170 unsigned level
, unsigned usage
,
171 const struct pipe_box
*box
)
173 struct nv04_resource
*buf
= nv04_resource(resource
);
174 struct nouveau_context
*nv
= nouveau_context(pipe
);
175 struct nouveau_transfer
*xfr
= CALLOC_STRUCT(nouveau_transfer
);
179 xfr
->base
.resource
= resource
;
180 xfr
->base
.box
.x
= box
->x
;
181 xfr
->base
.box
.width
= box
->width
;
182 xfr
->base
.usage
= usage
;
184 if (buf
->domain
== NOUVEAU_BO_VRAM
) {
185 if (usage
& PIPE_TRANSFER_READ
) {
186 if (buf
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
)
187 nouveau_buffer_download(nv
, buf
, 0, buf
->base
.width0
);
195 nouveau_buffer_transfer_destroy(struct pipe_context
*pipe
,
196 struct pipe_transfer
*transfer
)
198 struct nv04_resource
*buf
= nv04_resource(transfer
->resource
);
199 struct nouveau_transfer
*xfr
= nouveau_transfer(transfer
);
200 struct nouveau_context
*nv
= nouveau_context(pipe
);
202 if (xfr
->base
.usage
& PIPE_TRANSFER_WRITE
) {
203 if (buf
->domain
== NOUVEAU_BO_VRAM
) {
204 nouveau_buffer_upload(nv
, buf
, transfer
->box
.x
, transfer
->box
.width
);
207 if (buf
->domain
!= 0 && (buf
->base
.bind
& (PIPE_BIND_VERTEX_BUFFER
|
208 PIPE_BIND_INDEX_BUFFER
)))
209 nouveau_context(pipe
)->vbo_dirty
= TRUE
;
215 static INLINE boolean
216 nouveau_buffer_sync(struct nv04_resource
*buf
, unsigned rw
)
218 if (rw
== PIPE_TRANSFER_READ
) {
221 if (!nouveau_fence_wait(buf
->fence_wr
))
226 if (!nouveau_fence_wait(buf
->fence
))
229 nouveau_fence_ref(NULL
, &buf
->fence
);
231 nouveau_fence_ref(NULL
, &buf
->fence_wr
);
236 static INLINE boolean
237 nouveau_buffer_busy(struct nv04_resource
*buf
, unsigned rw
)
239 if (rw
== PIPE_TRANSFER_READ
)
240 return (buf
->fence_wr
&& !nouveau_fence_signalled(buf
->fence_wr
));
242 return (buf
->fence
&& !nouveau_fence_signalled(buf
->fence
));
246 nouveau_buffer_transfer_map(struct pipe_context
*pipe
,
247 struct pipe_transfer
*transfer
)
249 struct nouveau_transfer
*xfr
= nouveau_transfer(transfer
);
250 struct nv04_resource
*buf
= nv04_resource(transfer
->resource
);
251 struct nouveau_bo
*bo
= buf
->bo
;
254 uint32_t offset
= xfr
->base
.box
.x
;
257 if (buf
->domain
!= NOUVEAU_BO_GART
)
258 return buf
->data
+ offset
;
261 flags
= NOUVEAU_BO_NOSYNC
| NOUVEAU_BO_RDWR
;
263 flags
= nouveau_screen_transfer_flags(xfr
->base
.usage
);
265 offset
+= buf
->offset
;
267 ret
= nouveau_bo_map_range(buf
->bo
, offset
, xfr
->base
.box
.width
, flags
);
272 /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
273 * not doing so might make future maps fail or trigger "reloc while mapped"
274 * errors. For now, mappings to userspace are guaranteed to be persistent.
276 nouveau_bo_unmap(bo
);
279 if (xfr
->base
.usage
& PIPE_TRANSFER_DONTBLOCK
) {
280 if (nouveau_buffer_busy(buf
, xfr
->base
.usage
& PIPE_TRANSFER_READ_WRITE
))
283 if (!(xfr
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
284 nouveau_buffer_sync(buf
, xfr
->base
.usage
& PIPE_TRANSFER_READ_WRITE
);
293 nouveau_buffer_transfer_flush_region(struct pipe_context
*pipe
,
294 struct pipe_transfer
*transfer
,
295 const struct pipe_box
*box
)
297 struct nv04_resource
*res
= nv04_resource(transfer
->resource
);
298 struct nouveau_bo
*bo
= res
->bo
;
299 unsigned offset
= res
->offset
+ transfer
->box
.x
+ box
->x
;
301 /* not using non-snoop system memory yet, no need for cflush */
305 /* XXX: maybe need to upload for VRAM buffers here */
307 nouveau_screen_bo_map_flush_range(pipe
->screen
, bo
, offset
, box
->width
);
311 nouveau_buffer_transfer_unmap(struct pipe_context
*pipe
,
312 struct pipe_transfer
*transfer
)
314 /* we've called nouveau_bo_unmap right after map */
317 const struct u_resource_vtbl nouveau_buffer_vtbl
=
319 u_default_resource_get_handle
, /* get_handle */
320 nouveau_buffer_destroy
, /* resource_destroy */
321 nouveau_buffer_transfer_get
, /* get_transfer */
322 nouveau_buffer_transfer_destroy
, /* transfer_destroy */
323 nouveau_buffer_transfer_map
, /* transfer_map */
324 nouveau_buffer_transfer_flush_region
, /* transfer_flush_region */
325 nouveau_buffer_transfer_unmap
, /* transfer_unmap */
326 u_default_transfer_inline_write
/* transfer_inline_write */
329 struct pipe_resource
*
330 nouveau_buffer_create(struct pipe_screen
*pscreen
,
331 const struct pipe_resource
*templ
)
333 struct nouveau_screen
*screen
= nouveau_screen(pscreen
);
334 struct nv04_resource
*buffer
;
337 buffer
= CALLOC_STRUCT(nv04_resource
);
341 buffer
->base
= *templ
;
342 buffer
->vtbl
= &nouveau_buffer_vtbl
;
343 pipe_reference_init(&buffer
->base
.reference
, 1);
344 buffer
->base
.screen
= pscreen
;
346 if ((buffer
->base
.bind
& screen
->sysmem_bindings
) == screen
->sysmem_bindings
)
347 ret
= nouveau_buffer_allocate(screen
, buffer
, 0);
349 ret
= nouveau_buffer_allocate(screen
, buffer
, NOUVEAU_BO_GART
);
354 return &buffer
->base
;
362 struct pipe_resource
*
363 nouveau_user_buffer_create(struct pipe_screen
*pscreen
, void *ptr
,
364 unsigned bytes
, unsigned bind
)
366 struct nv04_resource
*buffer
;
368 buffer
= CALLOC_STRUCT(nv04_resource
);
372 pipe_reference_init(&buffer
->base
.reference
, 1);
373 buffer
->vtbl
= &nouveau_buffer_vtbl
;
374 buffer
->base
.screen
= pscreen
;
375 buffer
->base
.format
= PIPE_FORMAT_R8_UNORM
;
376 buffer
->base
.usage
= PIPE_USAGE_IMMUTABLE
;
377 buffer
->base
.bind
= bind
;
378 buffer
->base
.width0
= bytes
;
379 buffer
->base
.height0
= 1;
380 buffer
->base
.depth0
= 1;
383 buffer
->status
= NOUVEAU_BUFFER_STATUS_USER_MEMORY
;
385 return &buffer
->base
;
388 /* Like download, but for GART buffers. Merge ? */
389 static INLINE boolean
390 nouveau_buffer_data_fetch(struct nv04_resource
*buf
, struct nouveau_bo
*bo
,
391 unsigned offset
, unsigned size
)
394 buf
->data
= MALLOC(size
);
398 if (nouveau_bo_map_range(bo
, offset
, size
, NOUVEAU_BO_RD
))
400 memcpy(buf
->data
, bo
->map
, size
);
401 nouveau_bo_unmap(bo
);
406 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
408 nouveau_buffer_migrate(struct nouveau_context
*nv
,
409 struct nv04_resource
*buf
, const unsigned new_domain
)
411 struct nouveau_screen
*screen
= nv
->screen
;
412 struct nouveau_bo
*bo
;
413 const unsigned old_domain
= buf
->domain
;
414 unsigned size
= buf
->base
.width0
;
418 assert(new_domain
!= old_domain
);
420 if (new_domain
== NOUVEAU_BO_GART
&& old_domain
== 0) {
421 if (!nouveau_buffer_allocate(screen
, buf
, new_domain
))
423 ret
= nouveau_bo_map_range(buf
->bo
, buf
->offset
, size
, NOUVEAU_BO_WR
|
427 memcpy(buf
->bo
->map
, buf
->data
, size
);
428 nouveau_bo_unmap(buf
->bo
);
431 if (old_domain
!= 0 && new_domain
!= 0) {
432 struct nouveau_mm_allocation
*mm
= buf
->mm
;
434 if (new_domain
== NOUVEAU_BO_VRAM
) {
435 /* keep a system memory copy of our data in case we hit a fallback */
436 if (!nouveau_buffer_data_fetch(buf
, buf
->bo
, buf
->offset
, size
))
438 if (nouveau_mesa_debug
)
439 debug_printf("migrating %u KiB to VRAM\n", size
/ 1024);
442 offset
= buf
->offset
;
446 nouveau_buffer_allocate(screen
, buf
, new_domain
);
448 nv
->copy_data(nv
, buf
->bo
, buf
->offset
, new_domain
,
449 bo
, offset
, old_domain
, buf
->base
.width0
);
451 nouveau_bo_ref(NULL
, &bo
);
453 release_allocation(&mm
, screen
->fence
.current
);
455 if (new_domain
== NOUVEAU_BO_VRAM
&& old_domain
== 0) {
456 if (!nouveau_buffer_allocate(screen
, buf
, NOUVEAU_BO_VRAM
))
458 if (!nouveau_buffer_upload(nv
, buf
, 0, buf
->base
.width0
))
463 assert(buf
->domain
== new_domain
);
467 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
468 * We'd like to only allocate @size bytes here, but then we'd have to rebase
469 * the vertex indices ...
472 nouveau_user_buffer_upload(struct nv04_resource
*buf
,
473 unsigned base
, unsigned size
)
475 struct nouveau_screen
*screen
= nouveau_screen(buf
->base
.screen
);
478 assert(buf
->status
& NOUVEAU_BUFFER_STATUS_USER_MEMORY
);
480 buf
->base
.width0
= base
+ size
;
481 if (!nouveau_buffer_reallocate(screen
, buf
, NOUVEAU_BO_GART
))
484 ret
= nouveau_bo_map_range(buf
->bo
, buf
->offset
+ base
, size
,
485 NOUVEAU_BO_WR
| NOUVEAU_BO_NOSYNC
);
488 memcpy(buf
->bo
->map
, buf
->data
+ base
, size
);
489 nouveau_bo_unmap(buf
->bo
);