2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
7 #include "nouveau/nouveau_screen.h"
8 #include "nouveau/nouveau_winsys.h"
11 #include "nvc0_context.h"
12 #include "nvc0_resource.h"
14 struct nvc0_transfer
{
15 struct pipe_transfer base
;
18 static INLINE
struct nvc0_transfer
*
19 nvc0_transfer(struct pipe_transfer
*transfer
)
21 return (struct nvc0_transfer
*)transfer
;
25 nvc0_buffer_allocate(struct nvc0_screen
*screen
, struct nvc0_resource
*buf
,
28 if (domain
== NOUVEAU_BO_VRAM
) {
29 buf
->mm
= nvc0_mm_allocate(screen
->mm_VRAM
, buf
->base
.width0
, &buf
->bo
,
32 return nvc0_buffer_allocate(screen
, buf
, NOUVEAU_BO_GART
);
34 if (domain
== NOUVEAU_BO_GART
) {
35 buf
->mm
= nvc0_mm_allocate(screen
->mm_GART
, buf
->base
.width0
, &buf
->bo
,
40 if (domain
!= NOUVEAU_BO_GART
) {
42 buf
->data
= MALLOC(buf
->base
.width0
);
52 release_allocation(struct nvc0_mm_allocation
**mm
, struct nvc0_fence
*fence
)
54 if (fence
&& fence
->state
!= NVC0_FENCE_STATE_SIGNALLED
) {
55 nvc0_fence_sched_release(fence
, *mm
);
63 nvc0_buffer_reallocate(struct nvc0_screen
*screen
, struct nvc0_resource
*buf
,
66 nouveau_bo_ref(NULL
, &buf
->bo
);
69 release_allocation(&buf
->mm
, buf
->fence
);
71 return nvc0_buffer_allocate(screen
, buf
, domain
);
75 nvc0_buffer_destroy(struct pipe_screen
*pscreen
,
76 struct pipe_resource
*presource
)
78 struct nvc0_resource
*res
= nvc0_resource(presource
);
80 nouveau_bo_ref(NULL
, &res
->bo
);
83 release_allocation(&res
->mm
, res
->fence
);
85 if (res
->data
&& !(res
->status
& NVC0_BUFFER_STATUS_USER_MEMORY
))
91 /* Maybe just migrate to GART right away if we actually need to do this. */
93 nvc0_buffer_download(struct nvc0_context
*nvc0
, struct nvc0_resource
*buf
,
94 unsigned start
, unsigned size
)
96 struct nvc0_mm_allocation
*mm
;
97 struct nouveau_bo
*bounce
= NULL
;
100 assert(buf
->domain
== NOUVEAU_BO_VRAM
);
102 mm
= nvc0_mm_allocate(nvc0
->screen
->mm_GART
, size
, &bounce
, &offset
);
106 nvc0_m2mf_copy_linear(nvc0
, bounce
, offset
, NOUVEAU_BO_GART
,
107 buf
->bo
, buf
->offset
+ start
, NOUVEAU_BO_VRAM
,
110 if (nouveau_bo_map_range(bounce
, offset
, size
, NOUVEAU_BO_RD
))
112 memcpy(buf
->data
+ start
, bounce
->map
, size
);
113 nouveau_bo_unmap(bounce
);
115 buf
->status
&= ~NVC0_BUFFER_STATUS_DIRTY
;
117 nouveau_bo_ref(NULL
, &bounce
);
124 nvc0_buffer_upload(struct nvc0_context
*nvc0
, struct nvc0_resource
*buf
,
125 unsigned start
, unsigned size
)
127 struct nvc0_mm_allocation
*mm
;
128 struct nouveau_bo
*bounce
= NULL
;
132 nvc0_m2mf_push_linear(nvc0
, buf
->bo
, buf
->domain
, buf
->offset
+ start
,
133 size
, buf
->data
+ start
);
137 mm
= nvc0_mm_allocate(nvc0
->screen
->mm_GART
, size
, &bounce
, &offset
);
141 nouveau_bo_map_range(bounce
, offset
, size
,
142 NOUVEAU_BO_WR
| NOUVEAU_BO_NOSYNC
);
143 memcpy(bounce
->map
, buf
->data
+ start
, size
);
144 nouveau_bo_unmap(bounce
);
146 nvc0_m2mf_copy_linear(nvc0
, buf
->bo
, buf
->offset
+ start
, NOUVEAU_BO_VRAM
,
147 bounce
, offset
, NOUVEAU_BO_GART
, size
);
149 nouveau_bo_ref(NULL
, &bounce
);
151 release_allocation(&mm
, nvc0
->screen
->fence
.current
);
153 if (start
== 0 && size
== buf
->base
.width0
)
154 buf
->status
&= ~NVC0_BUFFER_STATUS_DIRTY
;
158 static struct pipe_transfer
*
159 nvc0_buffer_transfer_get(struct pipe_context
*pipe
,
160 struct pipe_resource
*resource
,
163 const struct pipe_box
*box
)
165 struct nvc0_resource
*buf
= nvc0_resource(resource
);
166 struct nvc0_transfer
*xfr
= CALLOC_STRUCT(nvc0_transfer
);
170 xfr
->base
.resource
= resource
;
171 xfr
->base
.box
.x
= box
->x
;
172 xfr
->base
.box
.width
= box
->width
;
173 xfr
->base
.usage
= usage
;
175 if (buf
->domain
== NOUVEAU_BO_VRAM
) {
176 if (usage
& PIPE_TRANSFER_READ
) {
177 if (buf
->status
& NVC0_BUFFER_STATUS_DIRTY
)
178 nvc0_buffer_download(nvc0_context(pipe
), buf
, 0, buf
->base
.width0
);
186 nvc0_buffer_transfer_destroy(struct pipe_context
*pipe
,
187 struct pipe_transfer
*transfer
)
189 struct nvc0_resource
*buf
= nvc0_resource(transfer
->resource
);
190 struct nvc0_transfer
*xfr
= nvc0_transfer(transfer
);
192 if (xfr
->base
.usage
& PIPE_TRANSFER_WRITE
) {
193 /* writing is worse */
194 nvc0_buffer_adjust_score(nvc0_context(pipe
), buf
, -5000);
196 if (buf
->domain
== NOUVEAU_BO_VRAM
) {
197 nvc0_buffer_upload(nvc0_context(pipe
), buf
,
198 transfer
->box
.x
, transfer
->box
.width
);
201 if (buf
->domain
!= 0 && (buf
->base
.bind
& (PIPE_BIND_VERTEX_BUFFER
|
202 PIPE_BIND_INDEX_BUFFER
)))
203 nvc0_context(pipe
)->vbo_dirty
= TRUE
;
209 static INLINE boolean
210 nvc0_buffer_sync(struct nvc0_resource
*buf
, unsigned rw
)
212 if (rw
== PIPE_TRANSFER_READ
) {
215 if (!nvc0_fence_wait(buf
->fence_wr
))
220 if (!nvc0_fence_wait(buf
->fence
))
223 nvc0_fence_reference(&buf
->fence
, NULL
);
225 nvc0_fence_reference(&buf
->fence_wr
, NULL
);
230 static INLINE boolean
231 nvc0_buffer_busy(struct nvc0_resource
*buf
, unsigned rw
)
233 if (rw
== PIPE_TRANSFER_READ
)
234 return (buf
->fence_wr
&& !nvc0_fence_signalled(buf
->fence_wr
));
236 return (buf
->fence
&& !nvc0_fence_signalled(buf
->fence
));
240 nvc0_buffer_transfer_map(struct pipe_context
*pipe
,
241 struct pipe_transfer
*transfer
)
243 struct nvc0_transfer
*xfr
= nvc0_transfer(transfer
);
244 struct nvc0_resource
*buf
= nvc0_resource(transfer
->resource
);
245 struct nouveau_bo
*bo
= buf
->bo
;
248 uint32_t offset
= xfr
->base
.box
.x
;
251 nvc0_buffer_adjust_score(nvc0_context(pipe
), buf
, -250);
253 if (buf
->domain
!= NOUVEAU_BO_GART
)
254 return buf
->data
+ offset
;
257 flags
= NOUVEAU_BO_NOSYNC
| NOUVEAU_BO_RDWR
;
259 flags
= nouveau_screen_transfer_flags(xfr
->base
.usage
);
261 offset
+= buf
->offset
;
263 ret
= nouveau_bo_map_range(buf
->bo
, offset
, xfr
->base
.box
.width
, flags
);
268 /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
269 * not doing so might make future maps fail or trigger "reloc while mapped"
270 * errors. For now, mappings to userspace are guaranteed to be persistent.
272 nouveau_bo_unmap(bo
);
275 if (xfr
->base
.usage
& PIPE_TRANSFER_DONTBLOCK
) {
276 if (nvc0_buffer_busy(buf
, xfr
->base
.usage
& PIPE_TRANSFER_READ_WRITE
))
279 if (!(xfr
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
280 nvc0_buffer_sync(buf
, xfr
->base
.usage
& PIPE_TRANSFER_READ_WRITE
);
289 nvc0_buffer_transfer_flush_region(struct pipe_context
*pipe
,
290 struct pipe_transfer
*transfer
,
291 const struct pipe_box
*box
)
293 struct nvc0_resource
*res
= nvc0_resource(transfer
->resource
);
294 struct nouveau_bo
*bo
= res
->bo
;
295 unsigned offset
= res
->offset
+ transfer
->box
.x
+ box
->x
;
297 /* not using non-snoop system memory yet, no need for cflush */
301 /* XXX: maybe need to upload for VRAM buffers here */
303 nouveau_screen_bo_map_flush_range(pipe
->screen
, bo
, offset
, box
->width
);
307 nvc0_buffer_transfer_unmap(struct pipe_context
*pipe
,
308 struct pipe_transfer
*transfer
)
310 /* we've called nouveau_bo_unmap right after map */
313 const struct u_resource_vtbl nvc0_buffer_vtbl
=
315 u_default_resource_get_handle
, /* get_handle */
316 nvc0_buffer_destroy
, /* resource_destroy */
317 NULL
, /* is_resource_referenced */
318 nvc0_buffer_transfer_get
, /* get_transfer */
319 nvc0_buffer_transfer_destroy
, /* transfer_destroy */
320 nvc0_buffer_transfer_map
, /* transfer_map */
321 nvc0_buffer_transfer_flush_region
, /* transfer_flush_region */
322 nvc0_buffer_transfer_unmap
, /* transfer_unmap */
323 u_default_transfer_inline_write
/* transfer_inline_write */
326 struct pipe_resource
*
327 nvc0_buffer_create(struct pipe_screen
*pscreen
,
328 const struct pipe_resource
*templ
)
330 struct nvc0_screen
*screen
= nvc0_screen(pscreen
);
331 struct nvc0_resource
*buffer
;
334 buffer
= CALLOC_STRUCT(nvc0_resource
);
338 buffer
->base
= *templ
;
339 buffer
->vtbl
= &nvc0_buffer_vtbl
;
340 pipe_reference_init(&buffer
->base
.reference
, 1);
341 buffer
->base
.screen
= pscreen
;
343 if (buffer
->base
.bind
& PIPE_BIND_CONSTANT_BUFFER
)
344 ret
= nvc0_buffer_allocate(screen
, buffer
, 0);
346 ret
= nvc0_buffer_allocate(screen
, buffer
, NOUVEAU_BO_GART
);
351 return &buffer
->base
;
359 struct pipe_resource
*
360 nvc0_user_buffer_create(struct pipe_screen
*pscreen
,
365 struct nvc0_resource
*buffer
;
367 buffer
= CALLOC_STRUCT(nvc0_resource
);
371 pipe_reference_init(&buffer
->base
.reference
, 1);
372 buffer
->vtbl
= &nvc0_buffer_vtbl
;
373 buffer
->base
.screen
= pscreen
;
374 buffer
->base
.format
= PIPE_FORMAT_R8_UNORM
;
375 buffer
->base
.usage
= PIPE_USAGE_IMMUTABLE
;
376 buffer
->base
.bind
= bind
;
377 buffer
->base
.width0
= bytes
;
378 buffer
->base
.height0
= 1;
379 buffer
->base
.depth0
= 1;
382 buffer
->status
= NVC0_BUFFER_STATUS_USER_MEMORY
;
384 return &buffer
->base
;
387 /* Like download, but for GART buffers. Merge ? */
388 static INLINE boolean
389 nvc0_buffer_data_fetch(struct nvc0_resource
*buf
,
390 struct nouveau_bo
*bo
, unsigned offset
, unsigned size
)
393 buf
->data
= MALLOC(size
);
397 if (nouveau_bo_map_range(bo
, offset
, size
, NOUVEAU_BO_RD
))
399 memcpy(buf
->data
, bo
->map
, size
);
400 nouveau_bo_unmap(bo
);
405 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
407 nvc0_buffer_migrate(struct nvc0_context
*nvc0
,
408 struct nvc0_resource
*buf
, const unsigned new_domain
)
410 struct nvc0_screen
*screen
= nvc0_screen(buf
->base
.screen
);
411 struct nouveau_bo
*bo
;
412 const unsigned old_domain
= buf
->domain
;
413 unsigned size
= buf
->base
.width0
;
417 assert(new_domain
!= old_domain
);
419 if (new_domain
== NOUVEAU_BO_GART
&& old_domain
== 0) {
420 if (!nvc0_buffer_allocate(screen
, buf
, new_domain
))
422 ret
= nouveau_bo_map_range(buf
->bo
, buf
->offset
, size
, NOUVEAU_BO_WR
|
426 memcpy(buf
->bo
->map
, buf
->data
, size
);
427 nouveau_bo_unmap(buf
->bo
);
430 if (old_domain
!= 0 && new_domain
!= 0) {
431 struct nvc0_mm_allocation
*mm
= buf
->mm
;
433 if (new_domain
== NOUVEAU_BO_VRAM
) {
434 /* keep a system memory copy of our data in case we hit a fallback */
435 if (!nvc0_buffer_data_fetch(buf
, buf
->bo
, buf
->offset
, size
))
437 debug_printf("migrating %u KiB to VRAM\n", size
/ 1024);
440 offset
= buf
->offset
;
444 nvc0_buffer_allocate(screen
, buf
, new_domain
);
446 nvc0_m2mf_copy_linear(nvc0
, buf
->bo
, buf
->offset
, new_domain
,
447 bo
, offset
, old_domain
, buf
->base
.width0
);
449 nouveau_bo_ref(NULL
, &bo
);
451 release_allocation(&mm
, screen
->fence
.current
);
453 if (new_domain
== NOUVEAU_BO_VRAM
&& old_domain
== 0) {
454 if (!nvc0_buffer_allocate(screen
, buf
, NOUVEAU_BO_VRAM
))
456 if (!nvc0_buffer_upload(nvc0
, buf
, 0, buf
->base
.width0
))
461 assert(buf
->domain
== new_domain
);
465 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
466 * We'd like to only allocate @size bytes here, but then we'd have to rebase
467 * the vertex indices ...
470 nvc0_user_buffer_upload(struct nvc0_resource
*buf
, unsigned base
, unsigned size
)
472 struct nvc0_screen
*screen
= nvc0_screen(buf
->base
.screen
);
475 assert(buf
->status
& NVC0_BUFFER_STATUS_USER_MEMORY
);
477 buf
->base
.width0
= base
+ size
;
478 if (!nvc0_buffer_reallocate(screen
, buf
, NOUVEAU_BO_GART
))
481 ret
= nouveau_bo_map_range(buf
->bo
, buf
->offset
+ base
, size
,
482 NOUVEAU_BO_WR
| NOUVEAU_BO_NOSYNC
);
485 memcpy(buf
->bo
->map
, buf
->data
+ base
, size
);
486 nouveau_bo_unmap(buf
->bo
);