2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
33 boolean
r600_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
34 struct radeon_winsys_cs_handle
*buf
,
35 enum radeon_bo_usage usage
)
37 if (ctx
->ws
->cs_is_buffer_referenced(ctx
->rings
.gfx
.cs
, buf
, usage
)) {
40 if (ctx
->rings
.dma
.cs
&&
41 ctx
->ws
->cs_is_buffer_referenced(ctx
->rings
.dma
.cs
, buf
, usage
)) {
47 void *r600_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
48 struct r600_resource
*resource
,
51 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
54 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
55 return ctx
->ws
->buffer_map(resource
->cs_buf
, NULL
, usage
);
58 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
59 /* have to wait for the last write */
60 rusage
= RADEON_USAGE_WRITE
;
63 if (ctx
->rings
.gfx
.cs
->cdw
!= ctx
->initial_gfx_cs_size
&&
64 ctx
->ws
->cs_is_buffer_referenced(ctx
->rings
.gfx
.cs
,
65 resource
->cs_buf
, rusage
)) {
66 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
67 ctx
->rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
70 ctx
->rings
.gfx
.flush(ctx
, 0);
74 if (ctx
->rings
.dma
.cs
&&
75 ctx
->rings
.dma
.cs
->cdw
&&
76 ctx
->ws
->cs_is_buffer_referenced(ctx
->rings
.dma
.cs
,
77 resource
->cs_buf
, rusage
)) {
78 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
79 ctx
->rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
82 ctx
->rings
.dma
.flush(ctx
, 0);
87 if (busy
|| ctx
->ws
->buffer_is_busy(resource
->buf
, rusage
)) {
88 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
91 /* We will be wait for the GPU. Wait for any offloaded
92 * CS flush to complete to avoid busy-waiting in the winsys. */
93 ctx
->ws
->cs_sync_flush(ctx
->rings
.gfx
.cs
);
94 if (ctx
->rings
.dma
.cs
)
95 ctx
->ws
->cs_sync_flush(ctx
->rings
.dma
.cs
);
99 /* Setting the CS to NULL will prevent doing checks we have done already. */
100 return ctx
->ws
->buffer_map(resource
->cs_buf
, NULL
, usage
);
103 bool r600_init_resource(struct r600_common_screen
*rscreen
,
104 struct r600_resource
*res
,
105 unsigned size
, unsigned alignment
,
106 bool use_reusable_pool
)
108 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
110 switch (res
->b
.b
.usage
) {
111 case PIPE_USAGE_STAGING
:
112 case PIPE_USAGE_DYNAMIC
:
113 case PIPE_USAGE_STREAM
:
114 /* Transfers are likely to occur more often with these resources. */
115 res
->domains
= RADEON_DOMAIN_GTT
;
117 case PIPE_USAGE_DEFAULT
:
118 case PIPE_USAGE_IMMUTABLE
:
120 /* Not listing GTT here improves performance in some apps. */
121 res
->domains
= RADEON_DOMAIN_VRAM
;
125 /* Use GTT for all persistent mappings, because they are
126 * always cached and coherent. */
127 if (res
->b
.b
.target
== PIPE_BUFFER
&&
128 res
->b
.b
.flags
& (PIPE_RESOURCE_FLAG_MAP_PERSISTENT
|
129 PIPE_RESOURCE_FLAG_MAP_COHERENT
)) {
130 res
->domains
= RADEON_DOMAIN_GTT
;
133 /* Tiled textures are unmappable. Always put them in VRAM. */
134 if (res
->b
.b
.target
!= PIPE_BUFFER
&&
135 rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
136 res
->domains
= RADEON_DOMAIN_VRAM
;
139 /* Allocate the resource. */
140 res
->buf
= rscreen
->ws
->buffer_create(rscreen
->ws
, size
, alignment
,
147 res
->cs_buf
= rscreen
->ws
->buffer_get_cs_handle(res
->buf
);
148 util_range_set_empty(&res
->valid_buffer_range
);
150 if (rscreen
->debug_flags
& DBG_VM
&& res
->b
.b
.target
== PIPE_BUFFER
) {
151 fprintf(stderr
, "VM start=0x%"PRIu64
" end=0x%"PRIu64
" | Buffer %u bytes\n",
152 r600_resource_va(&rscreen
->b
, &res
->b
.b
),
153 r600_resource_va(&rscreen
->b
, &res
->b
.b
) + res
->buf
->size
,
159 static void r600_buffer_destroy(struct pipe_screen
*screen
,
160 struct pipe_resource
*buf
)
162 struct r600_resource
*rbuffer
= r600_resource(buf
);
164 util_range_destroy(&rbuffer
->valid_buffer_range
);
165 pb_reference(&rbuffer
->buf
, NULL
);
169 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
170 struct pipe_resource
*resource
,
173 const struct pipe_box
*box
,
174 struct pipe_transfer
**ptransfer
,
175 void *data
, struct r600_resource
*staging
,
178 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
179 struct r600_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
181 transfer
->transfer
.resource
= resource
;
182 transfer
->transfer
.level
= level
;
183 transfer
->transfer
.usage
= usage
;
184 transfer
->transfer
.box
= *box
;
185 transfer
->transfer
.stride
= 0;
186 transfer
->transfer
.layer_stride
= 0;
187 transfer
->offset
= offset
;
188 transfer
->staging
= staging
;
189 *ptransfer
= &transfer
->transfer
;
193 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
194 struct pipe_resource
*resource
,
197 const struct pipe_box
*box
,
198 struct pipe_transfer
**ptransfer
)
200 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
201 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
202 struct r600_resource
*rbuffer
= r600_resource(resource
);
205 assert(box
->x
+ box
->width
<= resource
->width0
);
207 /* See if the buffer range being mapped has never been initialized,
208 * in which case it can be mapped unsynchronized. */
209 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
210 usage
& PIPE_TRANSFER_WRITE
&&
211 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
212 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
215 /* If discarding the entire range, discard the whole resource instead. */
216 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
217 box
->x
== 0 && box
->width
== resource
->width0
) {
218 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
221 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
222 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
223 assert(usage
& PIPE_TRANSFER_WRITE
);
225 /* Check if mapping this buffer would cause waiting for the GPU. */
226 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->cs_buf
, RADEON_USAGE_READWRITE
) ||
227 rctx
->ws
->buffer_is_busy(rbuffer
->buf
, RADEON_USAGE_READWRITE
)) {
228 rctx
->invalidate_buffer(&rctx
->b
, &rbuffer
->b
.b
);
230 /* At this point, the buffer is always idle. */
231 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
233 else if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
234 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
235 !(rscreen
->debug_flags
& DBG_NO_DISCARD_RANGE
) &&
236 (rscreen
->has_cp_dma
||
237 (rscreen
->has_streamout
&&
238 /* The buffer range must be aligned to 4 with streamout. */
239 box
->x
% 4 == 0 && box
->width
% 4 == 0))) {
240 assert(usage
& PIPE_TRANSFER_WRITE
);
242 /* Check if mapping this buffer would cause waiting for the GPU. */
243 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->cs_buf
, RADEON_USAGE_READWRITE
) ||
244 rctx
->ws
->buffer_is_busy(rbuffer
->buf
, RADEON_USAGE_READWRITE
)) {
245 /* Do a wait-free write-only transfer using a temporary buffer. */
247 struct r600_resource
*staging
= NULL
;
249 u_upload_alloc(rctx
->uploader
, 0, box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
),
250 &offset
, (struct pipe_resource
**)&staging
, (void**)&data
);
253 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
254 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
255 ptransfer
, data
, staging
, offset
);
257 return NULL
; /* error, shouldn't occur though */
260 /* At this point, the buffer is always idle (we checked it above). */
261 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
264 data
= r600_buffer_map_sync_with_rings(rctx
, rbuffer
, usage
);
270 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
271 ptransfer
, data
, NULL
, 0);
274 static void r600_buffer_transfer_unmap(struct pipe_context
*ctx
,
275 struct pipe_transfer
*transfer
)
277 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
278 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
279 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
281 if (rtransfer
->staging
) {
282 struct pipe_resource
*dst
, *src
;
283 unsigned soffset
, doffset
, size
;
286 dst
= transfer
->resource
;
287 src
= &rtransfer
->staging
->b
.b
;
288 size
= transfer
->box
.width
;
289 doffset
= transfer
->box
.x
;
290 soffset
= rtransfer
->offset
+ transfer
->box
.x
% R600_MAP_BUFFER_ALIGNMENT
;
292 u_box_1d(soffset
, size
, &box
);
294 /* Copy the staging buffer into the original one. */
295 if (!(size
% 4) && !(doffset
% 4) && !(soffset
% 4) &&
296 rctx
->dma_copy(ctx
, dst
, 0, doffset
, 0, 0, src
, 0, &box
)) {
299 ctx
->resource_copy_region(ctx
, dst
, 0, doffset
, 0, 0, src
, 0, &box
);
301 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
304 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
305 util_range_add(&rbuffer
->valid_buffer_range
, transfer
->box
.x
,
306 transfer
->box
.x
+ transfer
->box
.width
);
308 util_slab_free(&rctx
->pool_transfers
, transfer
);
311 static const struct u_resource_vtbl r600_buffer_vtbl
=
313 NULL
, /* get_handle */
314 r600_buffer_destroy
, /* resource_destroy */
315 r600_buffer_transfer_map
, /* transfer_map */
316 NULL
, /* transfer_flush_region */
317 r600_buffer_transfer_unmap
, /* transfer_unmap */
318 NULL
/* transfer_inline_write */
321 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
322 const struct pipe_resource
*templ
,
325 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
326 struct r600_resource
*rbuffer
;
328 rbuffer
= MALLOC_STRUCT(r600_resource
);
330 rbuffer
->b
.b
= *templ
;
331 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
332 rbuffer
->b
.b
.screen
= screen
;
333 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
334 util_range_init(&rbuffer
->valid_buffer_range
);
336 if (!r600_init_resource(rscreen
, rbuffer
, templ
->width0
, alignment
, TRUE
)) {
340 return &rbuffer
->b
.b
;