2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
27 #include "r600_pipe.h"
28 #include "util/u_upload_mgr.h"
29 #include "util/u_memory.h"
31 static void r600_buffer_destroy(struct pipe_screen
*screen
,
32 struct pipe_resource
*buf
)
34 struct r600_resource
*rbuffer
= r600_resource(buf
);
36 pb_reference(&rbuffer
->buf
, NULL
);
40 static void r600_set_constants_dirty_if_bound(struct r600_context
*rctx
,
41 struct r600_resource
*rbuffer
)
45 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
46 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
48 uint32_t mask
= state
->enabled_mask
;
51 unsigned i
= u_bit_scan(&mask
);
52 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
54 state
->dirty_mask
|= 1 << i
;
58 r600_constant_buffers_dirty(rctx
, state
);
63 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
64 struct pipe_resource
*resource
,
67 const struct pipe_box
*box
,
68 struct pipe_transfer
**ptransfer
,
69 void *data
, struct r600_resource
*staging
)
71 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
72 struct r600_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
74 transfer
->transfer
.resource
= resource
;
75 transfer
->transfer
.level
= level
;
76 transfer
->transfer
.usage
= usage
;
77 transfer
->transfer
.box
= *box
;
78 transfer
->transfer
.stride
= 0;
79 transfer
->transfer
.layer_stride
= 0;
80 transfer
->staging
= NULL
;
82 transfer
->staging
= staging
;
83 *ptransfer
= &transfer
->transfer
;
87 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
88 struct pipe_resource
*resource
,
91 const struct pipe_box
*box
,
92 struct pipe_transfer
**ptransfer
)
94 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
95 struct r600_resource
*rbuffer
= r600_resource(resource
);
98 assert(box
->x
+ box
->width
<= resource
->width0
);
100 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
101 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
102 assert(usage
& PIPE_TRANSFER_WRITE
);
104 /* Check if mapping this buffer would cause waiting for the GPU. */
105 if (rctx
->ws
->cs_is_buffer_referenced(rctx
->cs
, rbuffer
->cs_buf
, RADEON_USAGE_READWRITE
) ||
106 rctx
->ws
->buffer_is_busy(rbuffer
->buf
, RADEON_USAGE_READWRITE
)) {
109 /* Discard the buffer. */
110 pb_reference(&rbuffer
->buf
, NULL
);
112 /* Create a new one in the same pipe_resource. */
113 /* XXX We probably want a different alignment for buffers and textures. */
114 r600_init_resource(rctx
->screen
, rbuffer
, rbuffer
->b
.b
.width0
, 4096,
115 rbuffer
->b
.b
.bind
, rbuffer
->b
.b
.usage
);
117 /* We changed the buffer, now we need to bind it where the old one was bound. */
118 /* Vertex buffers. */
119 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
121 i
= u_bit_scan(&mask
);
122 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== &rbuffer
->b
.b
) {
123 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
124 r600_vertex_buffers_dirty(rctx
);
127 /* Streamout buffers. */
128 for (i
= 0; i
< rctx
->num_so_targets
; i
++) {
129 if (rctx
->so_targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
130 r600_context_streamout_end(rctx
);
131 rctx
->streamout_start
= TRUE
;
132 rctx
->streamout_append_bitmask
= ~0;
135 /* Constant buffers. */
136 r600_set_constants_dirty_if_bound(rctx
, rbuffer
);
139 #if 0 /* this is broken (see Bug 53130) */
140 else if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
141 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
142 rctx
->screen
->has_streamout
&&
143 /* The buffer range must be aligned to 4. */
144 box
->x
% 4 == 0 && box
->width
% 4 == 0) {
145 assert(usage
& PIPE_TRANSFER_WRITE
);
147 /* Check if mapping this buffer would cause waiting for the GPU. */
148 if (rctx
->ws
->cs_is_buffer_referenced(rctx
->cs
, rbuffer
->cs_buf
, RADEON_USAGE_READWRITE
) ||
149 rctx
->ws
->buffer_is_busy(rbuffer
->buf
, RADEON_USAGE_READWRITE
)) {
150 /* Do a wait-free write-only transfer using a temporary buffer. */
151 struct r600_resource
*staging
= (struct r600_resource
*)
152 pipe_buffer_create(ctx
->screen
, PIPE_BIND_VERTEX_BUFFER
,
153 PIPE_USAGE_STAGING
, box
->width
);
154 data
= rctx
->ws
->buffer_map(staging
->cs_buf
, rctx
->cs
, PIPE_TRANSFER_WRITE
);
158 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
159 ptransfer
, data
, staging
);
164 data
= rctx
->ws
->buffer_map(rbuffer
->cs_buf
, rctx
->cs
, usage
);
170 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
171 ptransfer
, data
, NULL
);
174 static void r600_buffer_transfer_unmap(struct pipe_context
*pipe
,
175 struct pipe_transfer
*transfer
)
177 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
178 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
180 if (rtransfer
->staging
) {
182 u_box_1d(0, transfer
->box
.width
, &box
);
184 /* Copy the staging buffer into the original one. */
185 r600_copy_buffer(pipe
, transfer
->resource
, transfer
->box
.x
,
186 &rtransfer
->staging
->b
.b
, &box
);
187 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
189 util_slab_free(&rctx
->pool_transfers
, transfer
);
192 static const struct u_resource_vtbl r600_buffer_vtbl
=
194 u_default_resource_get_handle
, /* get_handle */
195 r600_buffer_destroy
, /* resource_destroy */
196 r600_buffer_transfer_map
, /* transfer_map */
197 NULL
, /* transfer_flush_region */
198 r600_buffer_transfer_unmap
, /* transfer_unmap */
199 NULL
/* transfer_inline_write */
202 bool r600_init_resource(struct r600_screen
*rscreen
,
203 struct r600_resource
*res
,
204 unsigned size
, unsigned alignment
,
205 unsigned bind
, unsigned usage
)
207 uint32_t initial_domain
, domains
;
209 /* Staging resources particpate in transfers and blits only
210 * and are used for uploads and downloads from regular
211 * resources. We generate them internally for some transfers.
213 if (usage
== PIPE_USAGE_STAGING
) {
214 domains
= RADEON_DOMAIN_GTT
;
215 initial_domain
= RADEON_DOMAIN_GTT
;
217 domains
= RADEON_DOMAIN_GTT
| RADEON_DOMAIN_VRAM
;
220 case PIPE_USAGE_DYNAMIC
:
221 case PIPE_USAGE_STREAM
:
222 case PIPE_USAGE_STAGING
:
223 initial_domain
= RADEON_DOMAIN_GTT
;
225 case PIPE_USAGE_DEFAULT
:
226 case PIPE_USAGE_STATIC
:
227 case PIPE_USAGE_IMMUTABLE
:
229 initial_domain
= RADEON_DOMAIN_VRAM
;
234 res
->buf
= rscreen
->ws
->buffer_create(rscreen
->ws
, size
, alignment
, bind
, initial_domain
);
239 res
->cs_buf
= rscreen
->ws
->buffer_get_cs_handle(res
->buf
);
240 res
->domains
= domains
;
244 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
245 const struct pipe_resource
*templ
,
248 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
249 struct r600_resource
*rbuffer
;
251 rbuffer
= MALLOC_STRUCT(r600_resource
);
253 rbuffer
->b
.b
= *templ
;
254 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
255 rbuffer
->b
.b
.screen
= screen
;
256 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
258 if (!r600_init_resource(rscreen
, rbuffer
, templ
->width0
, alignment
, templ
->bind
, templ
->usage
)) {
262 return &rbuffer
->b
.b
;