2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include "util/u_threaded_context.h"
34 bool r600_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
35 struct pb_buffer
*buf
,
36 enum radeon_bo_usage usage
)
38 if (ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, buf
, usage
)) {
41 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
42 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, buf
, usage
)) {
48 void *r600_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
49 struct r600_resource
*resource
,
52 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
55 assert(!(resource
->flags
& RADEON_FLAG_SPARSE
));
57 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
58 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
61 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
62 /* have to wait for the last write */
63 rusage
= RADEON_USAGE_WRITE
;
66 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
67 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
,
68 resource
->buf
, rusage
)) {
69 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
70 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
73 ctx
->gfx
.flush(ctx
, 0, NULL
);
77 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
78 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
,
79 resource
->buf
, rusage
)) {
80 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
81 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
84 ctx
->dma
.flush(ctx
, 0, NULL
);
89 if (busy
|| !ctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
90 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
93 /* We will be wait for the GPU. Wait for any offloaded
94 * CS flush to complete to avoid busy-waiting in the winsys. */
95 ctx
->ws
->cs_sync_flush(ctx
->gfx
.cs
);
97 ctx
->ws
->cs_sync_flush(ctx
->dma
.cs
);
101 /* Setting the CS to NULL will prevent doing checks we have done already. */
102 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
105 void r600_init_resource_fields(struct r600_common_screen
*rscreen
,
106 struct r600_resource
*res
,
107 uint64_t size
, unsigned alignment
)
109 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
112 res
->bo_alignment
= alignment
;
115 switch (res
->b
.b
.usage
) {
116 case PIPE_USAGE_STREAM
:
117 res
->flags
= RADEON_FLAG_GTT_WC
;
119 case PIPE_USAGE_STAGING
:
120 /* Transfers are likely to occur more often with these
122 res
->domains
= RADEON_DOMAIN_GTT
;
124 case PIPE_USAGE_DYNAMIC
:
125 /* Older kernels didn't always flush the HDP cache before
128 if (rscreen
->info
.drm_major
== 2 &&
129 rscreen
->info
.drm_minor
< 40) {
130 res
->domains
= RADEON_DOMAIN_GTT
;
131 res
->flags
|= RADEON_FLAG_GTT_WC
;
134 res
->flags
|= RADEON_FLAG_CPU_ACCESS
;
136 case PIPE_USAGE_DEFAULT
:
137 case PIPE_USAGE_IMMUTABLE
:
139 /* Not listing GTT here improves performance in some
141 res
->domains
= RADEON_DOMAIN_VRAM
;
142 res
->flags
|= RADEON_FLAG_GTT_WC
;
146 if (res
->b
.b
.target
== PIPE_BUFFER
&&
147 res
->b
.b
.flags
& (PIPE_RESOURCE_FLAG_MAP_PERSISTENT
|
148 PIPE_RESOURCE_FLAG_MAP_COHERENT
)) {
149 /* Use GTT for all persistent mappings with older
150 * kernels, because they didn't always flush the HDP
151 * cache before CS execution.
153 * Write-combined CPU mappings are fine, the kernel
154 * ensures all CPU writes finish before the GPU
155 * executes a command stream.
157 if (rscreen
->info
.drm_major
== 2 &&
158 rscreen
->info
.drm_minor
< 40)
159 res
->domains
= RADEON_DOMAIN_GTT
;
160 else if (res
->domains
& RADEON_DOMAIN_VRAM
)
161 res
->flags
|= RADEON_FLAG_CPU_ACCESS
;
164 /* Tiled textures are unmappable. Always put them in VRAM. */
165 if ((res
->b
.b
.target
!= PIPE_BUFFER
&& !rtex
->surface
.is_linear
) ||
166 res
->flags
& R600_RESOURCE_FLAG_UNMAPPABLE
) {
167 res
->domains
= RADEON_DOMAIN_VRAM
;
168 res
->flags
&= ~RADEON_FLAG_CPU_ACCESS
;
169 res
->flags
|= RADEON_FLAG_NO_CPU_ACCESS
|
173 /* If VRAM is just stolen system memory, allow both VRAM and
174 * GTT, whichever has free space. If a buffer is evicted from
175 * VRAM to GTT, it will stay there.
177 * DRM 3.6.0 has good BO move throttling, so we can allow VRAM-only
178 * placements even with a low amount of stolen VRAM.
180 if (!rscreen
->info
.has_dedicated_vram
&&
181 (rscreen
->info
.drm_major
< 3 || rscreen
->info
.drm_minor
< 6) &&
182 res
->domains
== RADEON_DOMAIN_VRAM
)
183 res
->domains
= RADEON_DOMAIN_VRAM_GTT
;
185 if (rscreen
->debug_flags
& DBG_NO_WC
)
186 res
->flags
&= ~RADEON_FLAG_GTT_WC
;
188 /* Set expected VRAM and GART usage for the buffer. */
192 if (res
->domains
& RADEON_DOMAIN_VRAM
)
193 res
->vram_usage
= size
;
194 else if (res
->domains
& RADEON_DOMAIN_GTT
)
195 res
->gart_usage
= size
;
198 bool r600_alloc_resource(struct r600_common_screen
*rscreen
,
199 struct r600_resource
*res
)
201 struct pb_buffer
*old_buf
, *new_buf
;
203 /* Allocate a new resource. */
204 new_buf
= rscreen
->ws
->buffer_create(rscreen
->ws
, res
->bo_size
,
206 res
->domains
, res
->flags
);
211 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
212 * NULL. This should prevent crashes with multiple contexts using
213 * the same buffer where one of the contexts invalidates it while
214 * the others are using it. */
216 res
->buf
= new_buf
; /* should be atomic */
218 if (rscreen
->info
.has_virtual_memory
)
219 res
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(res
->buf
);
221 res
->gpu_address
= 0;
223 pb_reference(&old_buf
, NULL
);
225 util_range_set_empty(&res
->valid_buffer_range
);
226 res
->TC_L2_dirty
= false;
228 /* Print debug information. */
229 if (rscreen
->debug_flags
& DBG_VM
&& res
->b
.b
.target
== PIPE_BUFFER
) {
230 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Buffer %"PRIu64
" bytes\n",
231 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
,
237 static void r600_buffer_destroy(struct pipe_screen
*screen
,
238 struct pipe_resource
*buf
)
240 struct r600_resource
*rbuffer
= r600_resource(buf
);
242 util_range_destroy(&rbuffer
->valid_buffer_range
);
243 pb_reference(&rbuffer
->buf
, NULL
);
248 r600_invalidate_buffer(struct r600_common_context
*rctx
,
249 struct r600_resource
*rbuffer
)
251 /* Shared buffers can't be reallocated. */
252 if (rbuffer
->is_shared
)
255 /* Sparse buffers can't be reallocated. */
256 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
)
259 /* In AMD_pinned_memory, the user pointer association only gets
260 * broken when the buffer is explicitly re-allocated.
262 if (rctx
->ws
->buffer_is_user_ptr(rbuffer
->buf
))
265 /* Check if mapping this buffer would cause waiting for the GPU. */
266 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
267 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
268 rctx
->invalidate_buffer(&rctx
->b
, &rbuffer
->b
.b
);
270 util_range_set_empty(&rbuffer
->valid_buffer_range
);
276 void r600_invalidate_resource(struct pipe_context
*ctx
,
277 struct pipe_resource
*resource
)
279 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
280 struct r600_resource
*rbuffer
= r600_resource(resource
);
282 /* We currently only do anyting here for buffers */
283 if (resource
->target
== PIPE_BUFFER
)
284 (void)r600_invalidate_buffer(rctx
, rbuffer
);
287 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
288 struct pipe_resource
*resource
,
290 const struct pipe_box
*box
,
291 struct pipe_transfer
**ptransfer
,
292 void *data
, struct r600_resource
*staging
,
295 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
296 struct r600_transfer
*transfer
;
298 if (usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
)
299 transfer
= slab_alloc(&rctx
->pool_transfers_unsync
);
301 transfer
= slab_alloc(&rctx
->pool_transfers
);
303 transfer
->transfer
.resource
= NULL
;
304 pipe_resource_reference(&transfer
->transfer
.resource
, resource
);
305 transfer
->transfer
.level
= 0;
306 transfer
->transfer
.usage
= usage
;
307 transfer
->transfer
.box
= *box
;
308 transfer
->transfer
.stride
= 0;
309 transfer
->transfer
.layer_stride
= 0;
310 transfer
->offset
= offset
;
311 transfer
->staging
= staging
;
312 *ptransfer
= &transfer
->transfer
;
316 static bool r600_can_dma_copy_buffer(struct r600_common_context
*rctx
,
317 unsigned dstx
, unsigned srcx
, unsigned size
)
319 bool dword_aligned
= !(dstx
% 4) && !(srcx
% 4) && !(size
% 4);
321 return rctx
->screen
->has_cp_dma
||
322 (dword_aligned
&& (rctx
->dma
.cs
||
323 rctx
->screen
->has_streamout
));
327 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
328 struct pipe_resource
*resource
,
331 const struct pipe_box
*box
,
332 struct pipe_transfer
**ptransfer
)
334 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
335 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
336 struct r600_resource
*rbuffer
= r600_resource(resource
);
339 assert(box
->x
+ box
->width
<= resource
->width0
);
341 /* From GL_AMD_pinned_memory issues:
343 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
344 * same system address which was specified at creation time?
346 * RESOLVED: NO. The GL implementation might return a different
347 * virtual mapping of that memory, although the same physical
350 * So don't ever use staging buffers.
352 if (rscreen
->ws
->buffer_is_user_ptr(rbuffer
->buf
))
353 usage
|= PIPE_TRANSFER_PERSISTENT
;
355 /* See if the buffer range being mapped has never been initialized,
356 * in which case it can be mapped unsynchronized. */
357 if (!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
358 TC_TRANSFER_MAP_IGNORE_VALID_RANGE
)) &&
359 usage
& PIPE_TRANSFER_WRITE
&&
360 !rbuffer
->is_shared
&&
361 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
362 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
365 /* If discarding the entire range, discard the whole resource instead. */
366 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
367 box
->x
== 0 && box
->width
== resource
->width0
) {
368 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
371 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
372 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
373 TC_TRANSFER_MAP_NO_INVALIDATE
))) {
374 assert(usage
& PIPE_TRANSFER_WRITE
);
376 if (r600_invalidate_buffer(rctx
, rbuffer
)) {
377 /* At this point, the buffer is always idle. */
378 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
380 /* Fall back to a temporary buffer. */
381 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
385 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
386 !(rscreen
->debug_flags
& DBG_NO_DISCARD_RANGE
) &&
387 ((!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
388 PIPE_TRANSFER_PERSISTENT
)) &&
389 r600_can_dma_copy_buffer(rctx
, box
->x
, 0, box
->width
)) ||
390 (rbuffer
->flags
& RADEON_FLAG_SPARSE
))) {
391 assert(usage
& PIPE_TRANSFER_WRITE
);
393 /* Check if mapping this buffer would cause waiting for the GPU.
395 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
||
396 r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
397 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
398 /* Do a wait-free write-only transfer using a temporary buffer. */
400 struct r600_resource
*staging
= NULL
;
402 u_upload_alloc(ctx
->stream_uploader
, 0,
403 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
),
404 rctx
->screen
->info
.tcc_cache_line_size
,
405 &offset
, (struct pipe_resource
**)&staging
,
409 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
410 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
411 ptransfer
, data
, staging
, offset
);
412 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
416 /* At this point, the buffer is always idle (we checked it above). */
417 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
420 /* Use a staging buffer in cached GTT for reads. */
421 else if (((usage
& PIPE_TRANSFER_READ
) &&
422 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
423 (rbuffer
->domains
& RADEON_DOMAIN_VRAM
||
424 rbuffer
->flags
& RADEON_FLAG_GTT_WC
) &&
425 r600_can_dma_copy_buffer(rctx
, 0, box
->x
, box
->width
)) ||
426 (rbuffer
->flags
& RADEON_FLAG_SPARSE
)) {
427 struct r600_resource
*staging
;
429 assert(!(usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
));
430 staging
= (struct r600_resource
*) pipe_buffer_create(
431 ctx
->screen
, 0, PIPE_USAGE_STAGING
,
432 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
));
434 /* Copy the VRAM buffer to the staging buffer. */
435 rctx
->dma_copy(ctx
, &staging
->b
.b
, 0,
436 box
->x
% R600_MAP_BUFFER_ALIGNMENT
,
437 0, 0, resource
, 0, box
);
439 data
= r600_buffer_map_sync_with_rings(rctx
, staging
,
440 usage
& ~PIPE_TRANSFER_UNSYNCHRONIZED
);
442 r600_resource_reference(&staging
, NULL
);
445 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
447 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
448 ptransfer
, data
, staging
, 0);
449 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
454 data
= r600_buffer_map_sync_with_rings(rctx
, rbuffer
, usage
);
460 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
461 ptransfer
, data
, NULL
, 0);
464 static void r600_buffer_do_flush_region(struct pipe_context
*ctx
,
465 struct pipe_transfer
*transfer
,
466 const struct pipe_box
*box
)
468 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
469 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
471 if (rtransfer
->staging
) {
472 struct pipe_resource
*dst
, *src
;
474 struct pipe_box dma_box
;
476 dst
= transfer
->resource
;
477 src
= &rtransfer
->staging
->b
.b
;
478 soffset
= rtransfer
->offset
+ box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
480 u_box_1d(soffset
, box
->width
, &dma_box
);
482 /* Copy the staging buffer into the original one. */
483 ctx
->resource_copy_region(ctx
, dst
, 0, box
->x
, 0, 0, src
, 0, &dma_box
);
486 util_range_add(&rbuffer
->valid_buffer_range
, box
->x
,
487 box
->x
+ box
->width
);
490 static void r600_buffer_flush_region(struct pipe_context
*ctx
,
491 struct pipe_transfer
*transfer
,
492 const struct pipe_box
*rel_box
)
494 unsigned required_usage
= PIPE_TRANSFER_WRITE
|
495 PIPE_TRANSFER_FLUSH_EXPLICIT
;
497 if ((transfer
->usage
& required_usage
) == required_usage
) {
500 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
501 r600_buffer_do_flush_region(ctx
, transfer
, &box
);
505 static void r600_buffer_transfer_unmap(struct pipe_context
*ctx
,
506 struct pipe_transfer
*transfer
)
508 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
509 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
511 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
512 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
513 r600_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
515 if (rtransfer
->staging
)
516 r600_resource_reference(&rtransfer
->staging
, NULL
);
518 pipe_resource_reference(&transfer
->resource
, NULL
);
520 /* Don't use pool_transfers_unsync. We are always in the driver
522 slab_free(&rctx
->pool_transfers
, transfer
);
525 void r600_buffer_subdata(struct pipe_context
*ctx
,
526 struct pipe_resource
*buffer
,
527 unsigned usage
, unsigned offset
,
528 unsigned size
, const void *data
)
530 struct pipe_transfer
*transfer
= NULL
;
534 u_box_1d(offset
, size
, &box
);
535 map
= r600_buffer_transfer_map(ctx
, buffer
, 0,
536 PIPE_TRANSFER_WRITE
|
537 PIPE_TRANSFER_DISCARD_RANGE
|
543 memcpy(map
, data
, size
);
544 r600_buffer_transfer_unmap(ctx
, transfer
);
547 static const struct u_resource_vtbl r600_buffer_vtbl
=
549 NULL
, /* get_handle */
550 r600_buffer_destroy
, /* resource_destroy */
551 r600_buffer_transfer_map
, /* transfer_map */
552 r600_buffer_flush_region
, /* transfer_flush_region */
553 r600_buffer_transfer_unmap
, /* transfer_unmap */
556 static struct r600_resource
*
557 r600_alloc_buffer_struct(struct pipe_screen
*screen
,
558 const struct pipe_resource
*templ
)
560 struct r600_resource
*rbuffer
;
562 rbuffer
= MALLOC_STRUCT(r600_resource
);
564 rbuffer
->b
.b
= *templ
;
565 rbuffer
->b
.b
.next
= NULL
;
566 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
567 rbuffer
->b
.b
.screen
= screen
;
568 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
570 rbuffer
->bind_history
= 0;
571 rbuffer
->TC_L2_dirty
= false;
572 rbuffer
->is_shared
= false;
573 util_range_init(&rbuffer
->valid_buffer_range
);
577 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
578 const struct pipe_resource
*templ
,
581 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
582 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
584 r600_init_resource_fields(rscreen
, rbuffer
, templ
->width0
, alignment
);
586 if (templ
->bind
& PIPE_BIND_SHARED
)
587 rbuffer
->flags
|= RADEON_FLAG_HANDLE
;
588 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
589 rbuffer
->flags
|= RADEON_FLAG_SPARSE
;
591 if (!r600_alloc_resource(rscreen
, rbuffer
)) {
595 return &rbuffer
->b
.b
;
598 struct pipe_resource
*r600_aligned_buffer_create(struct pipe_screen
*screen
,
604 struct pipe_resource buffer
;
606 memset(&buffer
, 0, sizeof buffer
);
607 buffer
.target
= PIPE_BUFFER
;
608 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
610 buffer
.usage
= usage
;
611 buffer
.flags
= flags
;
612 buffer
.width0
= size
;
615 buffer
.array_size
= 1;
616 return r600_buffer_create(screen
, &buffer
, alignment
);
619 struct pipe_resource
*
620 r600_buffer_from_user_memory(struct pipe_screen
*screen
,
621 const struct pipe_resource
*templ
,
624 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
625 struct radeon_winsys
*ws
= rscreen
->ws
;
626 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
628 rbuffer
->domains
= RADEON_DOMAIN_GTT
;
630 util_range_add(&rbuffer
->valid_buffer_range
, 0, templ
->width0
);
632 /* Convert a user pointer to a buffer. */
633 rbuffer
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
639 if (rscreen
->info
.has_virtual_memory
)
640 rbuffer
->gpu_address
=
641 ws
->buffer_get_virtual_address(rbuffer
->buf
);
643 rbuffer
->gpu_address
= 0;
645 rbuffer
->vram_usage
= 0;
646 rbuffer
->gart_usage
= templ
->width0
;
648 return &rbuffer
->b
.b
;