dc3fc812e1ae34890671d25ac206c76f22bb6dc7
[mesa.git] / src / gallium / drivers / r600 / r600_buffer.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson <MostAwesomeDude@gmail.com>
26 */
27 #include <pipe/p_screen.h>
28 #include <util/u_format.h>
29 #include <util/u_math.h>
30 #include <util/u_inlines.h>
31 #include <util/u_memory.h>
32 #include <util/u_upload_mgr.h>
33 #include "state_tracker/drm_driver.h"
34 #include "r600_screen.h"
35 #include "r600_context.h"
36 #include "r600_resource.h"
37
38 extern struct u_resource_vtbl r600_buffer_vtbl;
39
40 u32 r600_domain_from_usage(unsigned usage)
41 {
42 u32 domain = RADEON_GEM_DOMAIN_GTT;
43
44 if (usage & PIPE_BIND_RENDER_TARGET) {
45 domain |= RADEON_GEM_DOMAIN_VRAM;
46 }
47 if (usage & PIPE_BIND_DEPTH_STENCIL) {
48 domain |= RADEON_GEM_DOMAIN_VRAM;
49 }
50 if (usage & PIPE_BIND_SAMPLER_VIEW) {
51 domain |= RADEON_GEM_DOMAIN_VRAM;
52 }
53 /* also need BIND_BLIT_SOURCE/DESTINATION ? */
54 if (usage & PIPE_BIND_VERTEX_BUFFER) {
55 domain |= RADEON_GEM_DOMAIN_GTT;
56 }
57 if (usage & PIPE_BIND_INDEX_BUFFER) {
58 domain |= RADEON_GEM_DOMAIN_GTT;
59 }
60 if (usage & PIPE_BIND_CONSTANT_BUFFER) {
61 domain |= RADEON_GEM_DOMAIN_VRAM;
62 }
63
64 return domain;
65 }
66
67 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
68 const struct pipe_resource *templ)
69 {
70 struct r600_screen *rscreen = r600_screen(screen);
71 struct r600_resource_buffer *rbuffer;
72 struct radeon_ws_bo *bo;
73 /* XXX We probably want a different alignment for buffers and textures. */
74 unsigned alignment = 4096;
75
76 rbuffer = CALLOC_STRUCT(r600_resource_buffer);
77 if (rbuffer == NULL)
78 return NULL;
79
80 rbuffer->magic = R600_BUFFER_MAGIC;
81 rbuffer->user_buffer = NULL;
82 rbuffer->num_ranges = 0;
83 rbuffer->r.base.b = *templ;
84 pipe_reference_init(&rbuffer->r.base.b.reference, 1);
85 rbuffer->r.base.b.screen = screen;
86 rbuffer->r.base.vtbl = &r600_buffer_vtbl;
87 rbuffer->r.size = rbuffer->r.base.b.width0;
88 rbuffer->r.domain = r600_domain_from_usage(rbuffer->r.base.b.bind);
89 bo = radeon_ws_bo(rscreen->rw, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind);
90 if (bo == NULL) {
91 FREE(rbuffer);
92 return NULL;
93 }
94 rbuffer->r.bo = bo;
95 return &rbuffer->r.base.b;
96 }
97
98 struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
99 void *ptr, unsigned bytes,
100 unsigned bind)
101 {
102 struct r600_resource_buffer *rbuffer;
103
104 rbuffer = CALLOC_STRUCT(r600_resource_buffer);
105 if (rbuffer == NULL)
106 return NULL;
107
108 rbuffer->magic = R600_BUFFER_MAGIC;
109 pipe_reference_init(&rbuffer->r.base.b.reference, 1);
110 rbuffer->r.base.vtbl = &r600_buffer_vtbl;
111 rbuffer->r.base.b.screen = screen;
112 rbuffer->r.base.b.target = PIPE_BUFFER;
113 rbuffer->r.base.b.format = PIPE_FORMAT_R8_UNORM;
114 rbuffer->r.base.b.usage = PIPE_USAGE_IMMUTABLE;
115 rbuffer->r.base.b.bind = bind;
116 rbuffer->r.base.b.width0 = bytes;
117 rbuffer->r.base.b.height0 = 1;
118 rbuffer->r.base.b.depth0 = 1;
119 rbuffer->r.base.b.flags = 0;
120 rbuffer->num_ranges = 0;
121 rbuffer->r.bo = NULL;
122 rbuffer->user_buffer = ptr;
123 return &rbuffer->r.base.b;
124 }
125
126 static void r600_buffer_destroy(struct pipe_screen *screen,
127 struct pipe_resource *buf)
128 {
129 struct r600_resource_buffer *rbuffer = r600_buffer(buf);
130 struct r600_screen *rscreen = r600_screen(screen);
131
132 if (rbuffer->r.bo) {
133 radeon_ws_bo_reference(rscreen->rw, &rbuffer->r.bo, NULL);
134 }
135 FREE(rbuffer);
136 }
137
138 static void *r600_buffer_transfer_map(struct pipe_context *pipe,
139 struct pipe_transfer *transfer)
140 {
141 struct r600_context *rctx = r600_context(pipe);
142 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
143 struct r600_screen *rscreen = r600_screen(pipe->screen);
144 int write = 0;
145 uint8_t *data;
146 int i;
147 boolean flush = FALSE;
148
149 if (rbuffer->user_buffer)
150 return (uint8_t*)rbuffer->user_buffer + transfer->box.x;
151
152 if (transfer->usage & PIPE_TRANSFER_DISCARD) {
153 for (i = 0; i < rbuffer->num_ranges; i++) {
154 if ((transfer->box.x >= rbuffer->ranges[i].start) &&
155 (transfer->box.x < rbuffer->ranges[i].end))
156 flush = TRUE;
157
158 if (flush) {
159 radeon_ws_bo_reference(rscreen->rw, &rbuffer->r.bo, NULL);
160 rbuffer->num_ranges = 0;
161 rbuffer->r.bo = radeon_ws_bo(rscreen->rw,
162 rbuffer->r.base.b.width0, 0,
163 rbuffer->r.base.b.bind);
164 break;
165 }
166 }
167 }
168 if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
169 /* FIXME */
170 }
171 if (transfer->usage & PIPE_TRANSFER_WRITE) {
172 write = 1;
173 }
174 data = radeon_ws_bo_map(rscreen->rw, rbuffer->r.bo, transfer->usage, rctx);
175 if (!data)
176 return NULL;
177
178 return (uint8_t*)data + transfer->box.x;
179 }
180
181 static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
182 struct pipe_transfer *transfer)
183 {
184 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
185 struct r600_screen *rscreen = r600_screen(pipe->screen);
186
187 if (rbuffer->r.bo)
188 radeon_ws_bo_unmap(rscreen->rw, rbuffer->r.bo);
189 }
190
191 static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
192 struct pipe_transfer *transfer,
193 const struct pipe_box *box)
194 {
195 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
196 unsigned i;
197 unsigned offset = transfer->box.x + box->x;
198 unsigned length = box->width;
199
200 assert(box->x + box->width <= transfer->box.width);
201
202 if (rbuffer->user_buffer)
203 return;
204
205 /* mark the range as used */
206 for(i = 0; i < rbuffer->num_ranges; ++i) {
207 if(offset <= rbuffer->ranges[i].end && rbuffer->ranges[i].start <= (offset+box->width)) {
208 rbuffer->ranges[i].start = MIN2(rbuffer->ranges[i].start, offset);
209 rbuffer->ranges[i].end = MAX2(rbuffer->ranges[i].end, (offset+length));
210 return;
211 }
212 }
213
214 rbuffer->ranges[rbuffer->num_ranges].start = offset;
215 rbuffer->ranges[rbuffer->num_ranges].end = offset+length;
216 rbuffer->num_ranges++;
217 }
218
219 unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context,
220 struct pipe_resource *buf,
221 unsigned face, unsigned level)
222 {
223 /* FIXME */
224 return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
225 }
226
227 struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen,
228 struct winsys_handle *whandle)
229 {
230 struct radeon *rw = (struct radeon*)screen->winsys;
231 struct r600_resource *rbuffer;
232 struct radeon_ws_bo *bo = NULL;
233
234 bo = radeon_ws_bo_handle(rw, whandle->handle);
235 if (bo == NULL) {
236 return NULL;
237 }
238
239 rbuffer = CALLOC_STRUCT(r600_resource);
240 if (rbuffer == NULL) {
241 radeon_ws_bo_reference(rw, &bo, NULL);
242 return NULL;
243 }
244
245 pipe_reference_init(&rbuffer->base.b.reference, 1);
246 rbuffer->base.b.target = PIPE_BUFFER;
247 rbuffer->base.b.screen = screen;
248 rbuffer->base.vtbl = &r600_buffer_vtbl;
249 rbuffer->bo = bo;
250 return &rbuffer->base.b;
251 }
252
253 struct u_resource_vtbl r600_buffer_vtbl =
254 {
255 u_default_resource_get_handle, /* get_handle */
256 r600_buffer_destroy, /* resource_destroy */
257 r600_buffer_is_referenced_by_cs, /* is_buffer_referenced */
258 u_default_get_transfer, /* get_transfer */
259 u_default_transfer_destroy, /* transfer_destroy */
260 r600_buffer_transfer_map, /* transfer_map */
261 r600_buffer_transfer_flush_region, /* transfer_flush_region */
262 r600_buffer_transfer_unmap, /* transfer_unmap */
263 u_default_transfer_inline_write /* transfer_inline_write */
264 };
265
266 int r600_upload_index_buffer(struct r600_context *rctx,
267 struct r600_draw *draw)
268 {
269 struct pipe_resource *upload_buffer = NULL;
270 unsigned index_offset = draw->index_buffer_offset;
271 int ret = 0;
272
273 if (r600_buffer_is_user_buffer(draw->index_buffer)) {
274 ret = u_upload_buffer(rctx->upload_ib,
275 index_offset,
276 draw->count * draw->index_size,
277 draw->index_buffer,
278 &index_offset,
279 &upload_buffer);
280 if (ret) {
281 goto done;
282 }
283 draw->index_buffer_offset = index_offset;
284 draw->index_buffer = upload_buffer;
285 }
286
287 done:
288 return ret;
289 }
290
291 int r600_upload_user_buffers(struct r600_context *rctx)
292 {
293 enum pipe_error ret = PIPE_OK;
294 int i, nr;
295
296 nr = rctx->vertex_elements->count;
297
298 for (i = 0; i < nr; i++) {
299 struct pipe_vertex_buffer *vb =
300 &rctx->vertex_buffer[rctx->vertex_elements->elements[i].vertex_buffer_index];
301
302 if (r600_buffer_is_user_buffer(vb->buffer)) {
303 struct pipe_resource *upload_buffer = NULL;
304 unsigned offset = 0; /*vb->buffer_offset * 4;*/
305 unsigned size = vb->buffer->width0;
306 unsigned upload_offset;
307 ret = u_upload_buffer(rctx->upload_vb,
308 offset, size,
309 vb->buffer,
310 &upload_offset, &upload_buffer);
311 if (ret)
312 return ret;
313
314 pipe_resource_reference(&vb->buffer, NULL);
315 vb->buffer = upload_buffer;
316 vb->buffer_offset = upload_offset;
317 }
318 }
319 return ret;
320 }