r600g: add a debug flag for printing virtual addresses of resources
[mesa.git] / src / gallium / drivers / r600 / r600_buffer.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
26 */
27 #include "r600_pipe.h"
28 #include "util/u_upload_mgr.h"
29 #include "util/u_memory.h"
30 #include "util/u_surface.h"
31
32 static void r600_buffer_destroy(struct pipe_screen *screen,
33 struct pipe_resource *buf)
34 {
35 struct r600_resource *rbuffer = r600_resource(buf);
36
37 util_range_destroy(&rbuffer->valid_buffer_range);
38 pb_reference(&rbuffer->buf, NULL);
39 FREE(rbuffer);
40 }
41
42 static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
43 struct r600_resource *rbuffer)
44 {
45 unsigned shader;
46
47 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
48 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
49 bool found = false;
50 uint32_t mask = state->enabled_mask;
51
52 while (mask) {
53 unsigned i = u_bit_scan(&mask);
54 if (state->cb[i].buffer == &rbuffer->b.b) {
55 found = true;
56 state->dirty_mask |= 1 << i;
57 }
58 }
59 if (found) {
60 r600_constant_buffers_dirty(rctx, state);
61 }
62 }
63 }
64
65 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
66 struct pipe_resource *resource,
67 unsigned level,
68 unsigned usage,
69 const struct pipe_box *box,
70 struct pipe_transfer **ptransfer,
71 void *data, struct r600_resource *staging,
72 unsigned offset)
73 {
74 struct r600_context *rctx = (struct r600_context*)ctx;
75 struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
76
77 transfer->transfer.resource = resource;
78 transfer->transfer.level = level;
79 transfer->transfer.usage = usage;
80 transfer->transfer.box = *box;
81 transfer->transfer.stride = 0;
82 transfer->transfer.layer_stride = 0;
83 transfer->offset = offset;
84 transfer->staging = staging;
85 *ptransfer = &transfer->transfer;
86 return data;
87 }
88
89 static void *r600_buffer_transfer_map(struct pipe_context *ctx,
90 struct pipe_resource *resource,
91 unsigned level,
92 unsigned usage,
93 const struct pipe_box *box,
94 struct pipe_transfer **ptransfer)
95 {
96 struct r600_context *rctx = (struct r600_context*)ctx;
97 struct r600_resource *rbuffer = r600_resource(resource);
98 uint8_t *data;
99
100 assert(box->x + box->width <= resource->width0);
101
102 /* See if the buffer range being mapped has never been initialized,
103 * in which case it can be mapped unsynchronized. */
104 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
105 usage & PIPE_TRANSFER_WRITE &&
106 !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
107 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
108 }
109
110 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
111 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
112 assert(usage & PIPE_TRANSFER_WRITE);
113
114 /* Check if mapping this buffer would cause waiting for the GPU. */
115 if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
116 rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
117 unsigned i, mask;
118
119 /* Discard the buffer. */
120 pb_reference(&rbuffer->buf, NULL);
121
122 /* Create a new one in the same pipe_resource. */
123 /* XXX We probably want a different alignment for buffers and textures. */
124 r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096,
125 TRUE, rbuffer->b.b.usage);
126
127 /* We changed the buffer, now we need to bind it where the old one was bound. */
128 /* Vertex buffers. */
129 mask = rctx->vertex_buffer_state.enabled_mask;
130 while (mask) {
131 i = u_bit_scan(&mask);
132 if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
133 rctx->vertex_buffer_state.dirty_mask |= 1 << i;
134 r600_vertex_buffers_dirty(rctx);
135 }
136 }
137 /* Streamout buffers. */
138 for (i = 0; i < rctx->streamout.num_targets; i++) {
139 if (rctx->streamout.targets[i]->b.buffer == &rbuffer->b.b) {
140 if (rctx->streamout.begin_emitted) {
141 r600_emit_streamout_end(rctx);
142 }
143 rctx->streamout.append_bitmask = rctx->streamout.enabled_mask;
144 r600_streamout_buffers_dirty(rctx);
145 }
146 }
147 /* Constant buffers. */
148 r600_set_constants_dirty_if_bound(rctx, rbuffer);
149 }
150 }
151 else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
152 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
153 !(rctx->screen->debug_flags & DBG_NO_DISCARD_RANGE) &&
154 (rctx->screen->has_cp_dma ||
155 (rctx->screen->has_streamout &&
156 /* The buffer range must be aligned to 4 with streamout. */
157 box->x % 4 == 0 && box->width % 4 == 0))) {
158 assert(usage & PIPE_TRANSFER_WRITE);
159
160 /* Check if mapping this buffer would cause waiting for the GPU. */
161 if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
162 rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
163 /* Do a wait-free write-only transfer using a temporary buffer. */
164 unsigned offset;
165 struct r600_resource *staging = NULL;
166
167 u_upload_alloc(rctx->uploader, 0, box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
168 &offset, (struct pipe_resource**)&staging, (void**)&data);
169
170 if (staging) {
171 data += box->x % R600_MAP_BUFFER_ALIGNMENT;
172 return r600_buffer_get_transfer(ctx, resource, level, usage, box,
173 ptransfer, data, staging, offset);
174 }
175 }
176 }
177
178 /* mmap and synchronize with rings */
179 data = r600_buffer_mmap_sync_with_rings(rctx, rbuffer, usage);
180 if (!data) {
181 return NULL;
182 }
183 data += box->x;
184
185 return r600_buffer_get_transfer(ctx, resource, level, usage, box,
186 ptransfer, data, NULL, 0);
187 }
188
189 static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
190 struct pipe_transfer *transfer)
191 {
192 struct r600_context *rctx = (struct r600_context*)pipe;
193 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
194 struct r600_resource *rbuffer = r600_resource(transfer->resource);
195
196 if (rtransfer->staging) {
197 struct pipe_resource *dst, *src;
198 unsigned soffset, doffset, size;
199
200 dst = transfer->resource;
201 src = &rtransfer->staging->b.b;
202 size = transfer->box.width;
203 doffset = transfer->box.x;
204 soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
205 /* Copy the staging buffer into the original one. */
206 if (rctx->rings.dma.cs && !(size % 4) && !(doffset % 4) && !(soffset % 4)) {
207 if (rctx->screen->chip_class >= EVERGREEN) {
208 evergreen_dma_copy(rctx, dst, src, doffset, soffset, size);
209 } else {
210 r600_dma_copy(rctx, dst, src, doffset, soffset, size);
211 }
212 } else {
213 struct pipe_box box;
214
215 u_box_1d(soffset, size, &box);
216 r600_copy_buffer(pipe, dst, doffset, src, &box);
217 }
218 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
219 }
220
221 if (transfer->usage & PIPE_TRANSFER_WRITE) {
222 util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
223 transfer->box.x + transfer->box.width);
224 }
225 util_slab_free(&rctx->pool_transfers, transfer);
226 }
227
228 static const struct u_resource_vtbl r600_buffer_vtbl =
229 {
230 u_default_resource_get_handle, /* get_handle */
231 r600_buffer_destroy, /* resource_destroy */
232 r600_buffer_transfer_map, /* transfer_map */
233 NULL, /* transfer_flush_region */
234 r600_buffer_transfer_unmap, /* transfer_unmap */
235 NULL /* transfer_inline_write */
236 };
237
238 bool r600_init_resource(struct r600_screen *rscreen,
239 struct r600_resource *res,
240 unsigned size, unsigned alignment,
241 bool use_reusable_pool, unsigned usage)
242 {
243 uint32_t initial_domain, domains;
244
245 switch(usage) {
246 case PIPE_USAGE_STAGING:
247 /* Staging resources participate in transfers, i.e. are used
248 * for uploads and downloads from regular resources.
249 * We generate them internally for some transfers.
250 */
251 initial_domain = RADEON_DOMAIN_GTT;
252 domains = RADEON_DOMAIN_GTT;
253 break;
254 case PIPE_USAGE_DYNAMIC:
255 case PIPE_USAGE_STREAM:
256 /* Default to GTT, but allow the memory manager to move it to VRAM. */
257 initial_domain = RADEON_DOMAIN_GTT;
258 domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
259 break;
260 case PIPE_USAGE_DEFAULT:
261 case PIPE_USAGE_STATIC:
262 case PIPE_USAGE_IMMUTABLE:
263 default:
264 /* Don't list GTT here, because the memory manager would put some
265 * resources to GTT no matter what the initial domain is.
266 * Not listing GTT in the domains improves performance a lot. */
267 initial_domain = RADEON_DOMAIN_VRAM;
268 domains = RADEON_DOMAIN_VRAM;
269 break;
270 }
271
272 res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
273 use_reusable_pool,
274 initial_domain);
275 if (!res->buf) {
276 return false;
277 }
278
279 res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
280 res->domains = domains;
281 util_range_set_empty(&res->valid_buffer_range);
282
283 if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
284 fprintf(stderr, "VM start=0x%llX end=0x%llX | Buffer %u bytes\n",
285 r600_resource_va(&rscreen->screen, &res->b.b),
286 r600_resource_va(&rscreen->screen, &res->b.b) + res->buf->size,
287 res->buf->size);
288 }
289 return true;
290 }
291
292 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
293 const struct pipe_resource *templ,
294 unsigned alignment)
295 {
296 struct r600_screen *rscreen = (struct r600_screen*)screen;
297 struct r600_resource *rbuffer;
298
299 rbuffer = MALLOC_STRUCT(r600_resource);
300
301 rbuffer->b.b = *templ;
302 pipe_reference_init(&rbuffer->b.b.reference, 1);
303 rbuffer->b.b.screen = screen;
304 rbuffer->b.vtbl = &r600_buffer_vtbl;
305 util_range_init(&rbuffer->valid_buffer_range);
306
307 if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE, templ->usage)) {
308 FREE(rbuffer);
309 return NULL;
310 }
311 return &rbuffer->b.b;
312 }