2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "radeonsi/si_pipe.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
31 bool si_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
32 struct pb_buffer
*buf
,
33 enum radeon_bo_usage usage
)
35 if (ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, buf
, usage
)) {
38 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
39 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, buf
, usage
)) {
45 void *si_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
46 struct r600_resource
*resource
,
49 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
52 assert(!(resource
->flags
& RADEON_FLAG_SPARSE
));
54 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
55 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
58 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
59 /* have to wait for the last write */
60 rusage
= RADEON_USAGE_WRITE
;
63 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
64 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
,
65 resource
->buf
, rusage
)) {
66 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
67 ctx
->gfx
.flush(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
70 ctx
->gfx
.flush(ctx
, 0, NULL
);
74 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
75 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
,
76 resource
->buf
, rusage
)) {
77 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
78 ctx
->dma
.flush(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
81 ctx
->dma
.flush(ctx
, 0, NULL
);
86 if (busy
|| !ctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
87 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
90 /* We will be wait for the GPU. Wait for any offloaded
91 * CS flush to complete to avoid busy-waiting in the winsys. */
92 ctx
->ws
->cs_sync_flush(ctx
->gfx
.cs
);
94 ctx
->ws
->cs_sync_flush(ctx
->dma
.cs
);
98 /* Setting the CS to NULL will prevent doing checks we have done already. */
99 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
102 void si_init_resource_fields(struct si_screen
*sscreen
,
103 struct r600_resource
*res
,
104 uint64_t size
, unsigned alignment
)
106 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
109 res
->bo_alignment
= alignment
;
111 res
->texture_handle_allocated
= false;
112 res
->image_handle_allocated
= false;
114 switch (res
->b
.b
.usage
) {
115 case PIPE_USAGE_STREAM
:
116 res
->flags
= RADEON_FLAG_GTT_WC
;
118 case PIPE_USAGE_STAGING
:
119 /* Transfers are likely to occur more often with these
121 res
->domains
= RADEON_DOMAIN_GTT
;
123 case PIPE_USAGE_DYNAMIC
:
124 /* Older kernels didn't always flush the HDP cache before
127 if (sscreen
->info
.drm_major
== 2 &&
128 sscreen
->info
.drm_minor
< 40) {
129 res
->domains
= RADEON_DOMAIN_GTT
;
130 res
->flags
|= RADEON_FLAG_GTT_WC
;
134 case PIPE_USAGE_DEFAULT
:
135 case PIPE_USAGE_IMMUTABLE
:
137 /* Not listing GTT here improves performance in some
139 res
->domains
= RADEON_DOMAIN_VRAM
;
140 res
->flags
|= RADEON_FLAG_GTT_WC
;
144 if (res
->b
.b
.target
== PIPE_BUFFER
&&
145 res
->b
.b
.flags
& (PIPE_RESOURCE_FLAG_MAP_PERSISTENT
|
146 PIPE_RESOURCE_FLAG_MAP_COHERENT
)) {
147 /* Use GTT for all persistent mappings with older
148 * kernels, because they didn't always flush the HDP
149 * cache before CS execution.
151 * Write-combined CPU mappings are fine, the kernel
152 * ensures all CPU writes finish before the GPU
153 * executes a command stream.
155 if (sscreen
->info
.drm_major
== 2 &&
156 sscreen
->info
.drm_minor
< 40)
157 res
->domains
= RADEON_DOMAIN_GTT
;
160 /* Tiled textures are unmappable. Always put them in VRAM. */
161 if ((res
->b
.b
.target
!= PIPE_BUFFER
&& !rtex
->surface
.is_linear
) ||
162 res
->b
.b
.flags
& R600_RESOURCE_FLAG_UNMAPPABLE
) {
163 res
->domains
= RADEON_DOMAIN_VRAM
;
164 res
->flags
|= RADEON_FLAG_NO_CPU_ACCESS
|
168 /* Displayable and shareable surfaces are not suballocated. */
169 if (res
->b
.b
.bind
& (PIPE_BIND_SHARED
| PIPE_BIND_SCANOUT
))
170 res
->flags
|= RADEON_FLAG_NO_SUBALLOC
; /* shareable */
172 res
->flags
|= RADEON_FLAG_NO_INTERPROCESS_SHARING
;
174 if (sscreen
->debug_flags
& DBG(NO_WC
))
175 res
->flags
&= ~RADEON_FLAG_GTT_WC
;
177 if (res
->b
.b
.flags
& R600_RESOURCE_FLAG_READ_ONLY
)
178 res
->flags
|= RADEON_FLAG_READ_ONLY
;
180 /* Set expected VRAM and GART usage for the buffer. */
183 res
->max_forced_staging_uploads
= 0;
184 res
->b
.max_forced_staging_uploads
= 0;
186 if (res
->domains
& RADEON_DOMAIN_VRAM
) {
187 res
->vram_usage
= size
;
189 res
->max_forced_staging_uploads
=
190 res
->b
.max_forced_staging_uploads
=
191 sscreen
->info
.has_dedicated_vram
&&
192 size
>= sscreen
->info
.vram_vis_size
/ 4 ? 1 : 0;
193 } else if (res
->domains
& RADEON_DOMAIN_GTT
) {
194 res
->gart_usage
= size
;
198 bool si_alloc_resource(struct si_screen
*sscreen
,
199 struct r600_resource
*res
)
201 struct pb_buffer
*old_buf
, *new_buf
;
203 /* Allocate a new resource. */
204 new_buf
= sscreen
->ws
->buffer_create(sscreen
->ws
, res
->bo_size
,
206 res
->domains
, res
->flags
);
211 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
212 * NULL. This should prevent crashes with multiple contexts using
213 * the same buffer where one of the contexts invalidates it while
214 * the others are using it. */
216 res
->buf
= new_buf
; /* should be atomic */
218 if (sscreen
->info
.has_virtual_memory
)
219 res
->gpu_address
= sscreen
->ws
->buffer_get_virtual_address(res
->buf
);
221 res
->gpu_address
= 0;
223 pb_reference(&old_buf
, NULL
);
225 util_range_set_empty(&res
->valid_buffer_range
);
226 res
->TC_L2_dirty
= false;
228 /* Print debug information. */
229 if (sscreen
->debug_flags
& DBG(VM
) && res
->b
.b
.target
== PIPE_BUFFER
) {
230 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Buffer %"PRIu64
" bytes\n",
231 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
,
237 static void r600_buffer_destroy(struct pipe_screen
*screen
,
238 struct pipe_resource
*buf
)
240 struct r600_resource
*rbuffer
= r600_resource(buf
);
242 threaded_resource_deinit(buf
);
243 util_range_destroy(&rbuffer
->valid_buffer_range
);
244 pb_reference(&rbuffer
->buf
, NULL
);
249 r600_invalidate_buffer(struct r600_common_context
*rctx
,
250 struct r600_resource
*rbuffer
)
252 /* Shared buffers can't be reallocated. */
253 if (rbuffer
->b
.is_shared
)
256 /* Sparse buffers can't be reallocated. */
257 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
)
260 /* In AMD_pinned_memory, the user pointer association only gets
261 * broken when the buffer is explicitly re-allocated.
263 if (rbuffer
->b
.is_user_ptr
)
266 /* Check if mapping this buffer would cause waiting for the GPU. */
267 if (si_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
268 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
269 rctx
->invalidate_buffer(&rctx
->b
, &rbuffer
->b
.b
);
271 util_range_set_empty(&rbuffer
->valid_buffer_range
);
277 /* Replace the storage of dst with src. */
278 void si_replace_buffer_storage(struct pipe_context
*ctx
,
279 struct pipe_resource
*dst
,
280 struct pipe_resource
*src
)
282 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
283 struct r600_resource
*rdst
= r600_resource(dst
);
284 struct r600_resource
*rsrc
= r600_resource(src
);
285 uint64_t old_gpu_address
= rdst
->gpu_address
;
287 pb_reference(&rdst
->buf
, rsrc
->buf
);
288 rdst
->gpu_address
= rsrc
->gpu_address
;
289 rdst
->b
.b
.bind
= rsrc
->b
.b
.bind
;
290 rdst
->b
.max_forced_staging_uploads
= rsrc
->b
.max_forced_staging_uploads
;
291 rdst
->max_forced_staging_uploads
= rsrc
->max_forced_staging_uploads
;
292 rdst
->flags
= rsrc
->flags
;
294 assert(rdst
->vram_usage
== rsrc
->vram_usage
);
295 assert(rdst
->gart_usage
== rsrc
->gart_usage
);
296 assert(rdst
->bo_size
== rsrc
->bo_size
);
297 assert(rdst
->bo_alignment
== rsrc
->bo_alignment
);
298 assert(rdst
->domains
== rsrc
->domains
);
300 rctx
->rebind_buffer(ctx
, dst
, old_gpu_address
);
303 static void si_invalidate_resource(struct pipe_context
*ctx
,
304 struct pipe_resource
*resource
)
306 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
307 struct r600_resource
*rbuffer
= r600_resource(resource
);
309 /* We currently only do anyting here for buffers */
310 if (resource
->target
== PIPE_BUFFER
)
311 (void)r600_invalidate_buffer(rctx
, rbuffer
);
314 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
315 struct pipe_resource
*resource
,
317 const struct pipe_box
*box
,
318 struct pipe_transfer
**ptransfer
,
319 void *data
, struct r600_resource
*staging
,
322 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
323 struct r600_transfer
*transfer
;
325 if (usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
)
326 transfer
= slab_alloc(&rctx
->pool_transfers_unsync
);
328 transfer
= slab_alloc(&rctx
->pool_transfers
);
330 transfer
->b
.b
.resource
= NULL
;
331 pipe_resource_reference(&transfer
->b
.b
.resource
, resource
);
332 transfer
->b
.b
.level
= 0;
333 transfer
->b
.b
.usage
= usage
;
334 transfer
->b
.b
.box
= *box
;
335 transfer
->b
.b
.stride
= 0;
336 transfer
->b
.b
.layer_stride
= 0;
337 transfer
->b
.staging
= NULL
;
338 transfer
->offset
= offset
;
339 transfer
->staging
= staging
;
340 *ptransfer
= &transfer
->b
.b
;
344 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
345 struct pipe_resource
*resource
,
348 const struct pipe_box
*box
,
349 struct pipe_transfer
**ptransfer
)
351 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
352 struct r600_resource
*rbuffer
= r600_resource(resource
);
355 assert(box
->x
+ box
->width
<= resource
->width0
);
357 /* From GL_AMD_pinned_memory issues:
359 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
360 * same system address which was specified at creation time?
362 * RESOLVED: NO. The GL implementation might return a different
363 * virtual mapping of that memory, although the same physical
366 * So don't ever use staging buffers.
368 if (rbuffer
->b
.is_user_ptr
)
369 usage
|= PIPE_TRANSFER_PERSISTENT
;
371 /* See if the buffer range being mapped has never been initialized,
372 * in which case it can be mapped unsynchronized. */
373 if (!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
374 TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED
)) &&
375 usage
& PIPE_TRANSFER_WRITE
&&
376 !rbuffer
->b
.is_shared
&&
377 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
378 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
381 /* If discarding the entire range, discard the whole resource instead. */
382 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
383 box
->x
== 0 && box
->width
== resource
->width0
) {
384 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
387 /* If a buffer in VRAM is too large and the range is discarded, don't
388 * map it directly. This makes sure that the buffer stays in VRAM.
390 bool force_discard_range
= false;
391 if (usage
& (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
|
392 PIPE_TRANSFER_DISCARD_RANGE
) &&
393 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
394 /* Try not to decrement the counter if it's not positive. Still racy,
395 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
396 rbuffer
->max_forced_staging_uploads
> 0 &&
397 p_atomic_dec_return(&rbuffer
->max_forced_staging_uploads
) >= 0) {
398 usage
&= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
|
399 PIPE_TRANSFER_UNSYNCHRONIZED
);
400 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
401 force_discard_range
= true;
404 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
405 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
406 TC_TRANSFER_MAP_NO_INVALIDATE
))) {
407 assert(usage
& PIPE_TRANSFER_WRITE
);
409 if (r600_invalidate_buffer(rctx
, rbuffer
)) {
410 /* At this point, the buffer is always idle. */
411 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
413 /* Fall back to a temporary buffer. */
414 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
418 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
419 ((!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
420 PIPE_TRANSFER_PERSISTENT
))) ||
421 (rbuffer
->flags
& RADEON_FLAG_SPARSE
))) {
422 assert(usage
& PIPE_TRANSFER_WRITE
);
424 /* Check if mapping this buffer would cause waiting for the GPU.
426 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
||
427 force_discard_range
||
428 si_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
429 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
430 /* Do a wait-free write-only transfer using a temporary buffer. */
432 struct r600_resource
*staging
= NULL
;
434 u_upload_alloc(ctx
->stream_uploader
, 0,
435 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
),
436 rctx
->screen
->info
.tcc_cache_line_size
,
437 &offset
, (struct pipe_resource
**)&staging
,
441 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
442 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
443 ptransfer
, data
, staging
, offset
);
444 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
448 /* At this point, the buffer is always idle (we checked it above). */
449 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
452 /* Use a staging buffer in cached GTT for reads. */
453 else if (((usage
& PIPE_TRANSFER_READ
) &&
454 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
455 (rbuffer
->domains
& RADEON_DOMAIN_VRAM
||
456 rbuffer
->flags
& RADEON_FLAG_GTT_WC
)) ||
457 (rbuffer
->flags
& RADEON_FLAG_SPARSE
)) {
458 struct r600_resource
*staging
;
460 assert(!(usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
));
461 staging
= (struct r600_resource
*) pipe_buffer_create(
462 ctx
->screen
, 0, PIPE_USAGE_STAGING
,
463 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
));
465 /* Copy the VRAM buffer to the staging buffer. */
466 rctx
->dma_copy(ctx
, &staging
->b
.b
, 0,
467 box
->x
% R600_MAP_BUFFER_ALIGNMENT
,
468 0, 0, resource
, 0, box
);
470 data
= si_buffer_map_sync_with_rings(rctx
, staging
,
471 usage
& ~PIPE_TRANSFER_UNSYNCHRONIZED
);
473 r600_resource_reference(&staging
, NULL
);
476 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
478 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
479 ptransfer
, data
, staging
, 0);
480 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
485 data
= si_buffer_map_sync_with_rings(rctx
, rbuffer
, usage
);
491 return r600_buffer_get_transfer(ctx
, resource
, usage
, box
,
492 ptransfer
, data
, NULL
, 0);
495 static void r600_buffer_do_flush_region(struct pipe_context
*ctx
,
496 struct pipe_transfer
*transfer
,
497 const struct pipe_box
*box
)
499 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
500 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
502 if (rtransfer
->staging
) {
503 struct pipe_resource
*dst
, *src
;
505 struct pipe_box dma_box
;
507 dst
= transfer
->resource
;
508 src
= &rtransfer
->staging
->b
.b
;
509 soffset
= rtransfer
->offset
+ box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
511 u_box_1d(soffset
, box
->width
, &dma_box
);
513 /* Copy the staging buffer into the original one. */
514 ctx
->resource_copy_region(ctx
, dst
, 0, box
->x
, 0, 0, src
, 0, &dma_box
);
517 util_range_add(&rbuffer
->valid_buffer_range
, box
->x
,
518 box
->x
+ box
->width
);
521 static void r600_buffer_flush_region(struct pipe_context
*ctx
,
522 struct pipe_transfer
*transfer
,
523 const struct pipe_box
*rel_box
)
525 unsigned required_usage
= PIPE_TRANSFER_WRITE
|
526 PIPE_TRANSFER_FLUSH_EXPLICIT
;
528 if ((transfer
->usage
& required_usage
) == required_usage
) {
531 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
532 r600_buffer_do_flush_region(ctx
, transfer
, &box
);
536 static void r600_buffer_transfer_unmap(struct pipe_context
*ctx
,
537 struct pipe_transfer
*transfer
)
539 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
540 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
542 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
543 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
544 r600_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
546 r600_resource_reference(&rtransfer
->staging
, NULL
);
547 assert(rtransfer
->b
.staging
== NULL
); /* for threaded context only */
548 pipe_resource_reference(&transfer
->resource
, NULL
);
550 /* Don't use pool_transfers_unsync. We are always in the driver
552 slab_free(&rctx
->pool_transfers
, transfer
);
555 static void si_buffer_subdata(struct pipe_context
*ctx
,
556 struct pipe_resource
*buffer
,
557 unsigned usage
, unsigned offset
,
558 unsigned size
, const void *data
)
560 struct pipe_transfer
*transfer
= NULL
;
564 u_box_1d(offset
, size
, &box
);
565 map
= r600_buffer_transfer_map(ctx
, buffer
, 0,
566 PIPE_TRANSFER_WRITE
|
567 PIPE_TRANSFER_DISCARD_RANGE
|
573 memcpy(map
, data
, size
);
574 r600_buffer_transfer_unmap(ctx
, transfer
);
577 static const struct u_resource_vtbl r600_buffer_vtbl
=
579 NULL
, /* get_handle */
580 r600_buffer_destroy
, /* resource_destroy */
581 r600_buffer_transfer_map
, /* transfer_map */
582 r600_buffer_flush_region
, /* transfer_flush_region */
583 r600_buffer_transfer_unmap
, /* transfer_unmap */
586 static struct r600_resource
*
587 r600_alloc_buffer_struct(struct pipe_screen
*screen
,
588 const struct pipe_resource
*templ
)
590 struct r600_resource
*rbuffer
;
592 rbuffer
= MALLOC_STRUCT(r600_resource
);
594 rbuffer
->b
.b
= *templ
;
595 rbuffer
->b
.b
.next
= NULL
;
596 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
597 rbuffer
->b
.b
.screen
= screen
;
599 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
600 threaded_resource_init(&rbuffer
->b
.b
);
603 rbuffer
->bind_history
= 0;
604 rbuffer
->TC_L2_dirty
= false;
605 util_range_init(&rbuffer
->valid_buffer_range
);
609 static struct pipe_resource
*si_buffer_create(struct pipe_screen
*screen
,
610 const struct pipe_resource
*templ
,
613 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
614 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
616 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
617 rbuffer
->b
.b
.flags
|= R600_RESOURCE_FLAG_UNMAPPABLE
;
619 si_init_resource_fields(sscreen
, rbuffer
, templ
->width0
, alignment
);
621 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
622 rbuffer
->flags
|= RADEON_FLAG_SPARSE
;
624 if (!si_alloc_resource(sscreen
, rbuffer
)) {
628 return &rbuffer
->b
.b
;
631 struct pipe_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
,
637 struct pipe_resource buffer
;
639 memset(&buffer
, 0, sizeof buffer
);
640 buffer
.target
= PIPE_BUFFER
;
641 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
643 buffer
.usage
= usage
;
644 buffer
.flags
= flags
;
645 buffer
.width0
= size
;
648 buffer
.array_size
= 1;
649 return si_buffer_create(screen
, &buffer
, alignment
);
652 static struct pipe_resource
*
653 si_buffer_from_user_memory(struct pipe_screen
*screen
,
654 const struct pipe_resource
*templ
,
657 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
658 struct radeon_winsys
*ws
= sscreen
->ws
;
659 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
661 rbuffer
->domains
= RADEON_DOMAIN_GTT
;
663 rbuffer
->b
.is_user_ptr
= true;
664 util_range_add(&rbuffer
->valid_buffer_range
, 0, templ
->width0
);
665 util_range_add(&rbuffer
->b
.valid_buffer_range
, 0, templ
->width0
);
667 /* Convert a user pointer to a buffer. */
668 rbuffer
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
674 if (sscreen
->info
.has_virtual_memory
)
675 rbuffer
->gpu_address
=
676 ws
->buffer_get_virtual_address(rbuffer
->buf
);
678 rbuffer
->gpu_address
= 0;
680 rbuffer
->vram_usage
= 0;
681 rbuffer
->gart_usage
= templ
->width0
;
683 return &rbuffer
->b
.b
;
686 static struct pipe_resource
*si_resource_create(struct pipe_screen
*screen
,
687 const struct pipe_resource
*templ
)
689 if (templ
->target
== PIPE_BUFFER
) {
690 return si_buffer_create(screen
, templ
, 256);
692 return si_texture_create(screen
, templ
);
696 void si_init_screen_buffer_functions(struct si_screen
*sscreen
)
698 sscreen
->b
.resource_create
= si_resource_create
;
699 sscreen
->b
.resource_destroy
= u_resource_destroy_vtbl
;
700 sscreen
->b
.resource_from_user_memory
= si_buffer_from_user_memory
;
703 void si_init_buffer_functions(struct si_context
*sctx
)
705 sctx
->b
.b
.invalidate_resource
= si_invalidate_resource
;
706 sctx
->b
.b
.transfer_map
= u_transfer_map_vtbl
;
707 sctx
->b
.b
.transfer_flush_region
= u_transfer_flush_region_vtbl
;
708 sctx
->b
.b
.transfer_unmap
= u_transfer_unmap_vtbl
;
709 sctx
->b
.b
.texture_subdata
= u_default_texture_subdata
;
710 sctx
->b
.b
.buffer_subdata
= si_buffer_subdata
;