st/mesa: Destroy buffer object's mutex.
[mesa.git] / src / mesa / state_tracker / st_cb_bufferobjects.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Functions for pixel buffer objects and vertex/element buffer objects.
31 */
32
33
34 #include <inttypes.h> /* for PRId64 macro */
35
36 #include "main/imports.h"
37 #include "main/mtypes.h"
38 #include "main/arrayobj.h"
39 #include "main/bufferobj.h"
40
41 #include "st_context.h"
42 #include "st_cb_bufferobjects.h"
43 #include "st_debug.h"
44
45 #include "pipe/p_context.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_inlines.h"
48
49
50 /**
51 * There is some duplication between mesa's bufferobjects and our
52 * bufmgr buffers. Both have an integer handle and a hashtable to
53 * lookup an opaque structure. It would be nice if the handles and
54 * internal structure where somehow shared.
55 */
56 static struct gl_buffer_object *
57 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
58 {
59 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
60
61 if (!st_obj)
62 return NULL;
63
64 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
65
66 return &st_obj->Base;
67 }
68
69
70
71 /**
72 * Deallocate/free a vertex/pixel buffer object.
73 * Called via glDeleteBuffersARB().
74 */
75 static void
76 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
77 {
78 struct st_buffer_object *st_obj = st_buffer_object(obj);
79
80 assert(obj->RefCount == 0);
81 _mesa_buffer_unmap_all_mappings(ctx, obj);
82
83 if (st_obj->buffer)
84 pipe_resource_reference(&st_obj->buffer, NULL);
85
86 mtx_destroy(&st_obj->Base.Mutex);
87 free(st_obj->Base.Label);
88 free(st_obj);
89 }
90
91
92
93 /**
94 * Replace data in a subrange of buffer object. If the data range
95 * specified by size + offset extends beyond the end of the buffer or
96 * if data is NULL, no copy is performed.
97 * Called via glBufferSubDataARB().
98 */
99 static void
100 st_bufferobj_subdata(struct gl_context *ctx,
101 GLintptrARB offset,
102 GLsizeiptrARB size,
103 const GLvoid * data, struct gl_buffer_object *obj)
104 {
105 struct st_buffer_object *st_obj = st_buffer_object(obj);
106
107 /* we may be called from VBO code, so double-check params here */
108 assert(offset >= 0);
109 assert(size >= 0);
110 assert(offset + size <= obj->Size);
111
112 if (!size)
113 return;
114
115 /*
116 * According to ARB_vertex_buffer_object specification, if data is null,
117 * then the contents of the buffer object's data store is undefined. We just
118 * ignore, and leave it unchanged.
119 */
120 if (!data)
121 return;
122
123 if (!st_obj->buffer) {
124 /* we probably ran out of memory during buffer allocation */
125 return;
126 }
127
128 /* Now that transfers are per-context, we don't have to figure out
129 * flushing here. Usually drivers won't need to flush in this case
130 * even if the buffer is currently referenced by hardware - they
131 * just queue the upload as dma rather than mapping the underlying
132 * buffer directly.
133 */
134 pipe_buffer_write(st_context(ctx)->pipe,
135 st_obj->buffer,
136 offset, size, data);
137 }
138
139
140 /**
141 * Called via glGetBufferSubDataARB().
142 */
143 static void
144 st_bufferobj_get_subdata(struct gl_context *ctx,
145 GLintptrARB offset,
146 GLsizeiptrARB size,
147 GLvoid * data, struct gl_buffer_object *obj)
148 {
149 struct st_buffer_object *st_obj = st_buffer_object(obj);
150
151 /* we may be called from VBO code, so double-check params here */
152 assert(offset >= 0);
153 assert(size >= 0);
154 assert(offset + size <= obj->Size);
155
156 if (!size)
157 return;
158
159 if (!st_obj->buffer) {
160 /* we probably ran out of memory during buffer allocation */
161 return;
162 }
163
164 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
165 offset, size, data);
166 }
167
168
169 /**
170 * Allocate space for and store data in a buffer object. Any data that was
171 * previously stored in the buffer object is lost. If data is NULL,
172 * memory will be allocated, but no copy will occur.
173 * Called via ctx->Driver.BufferData().
174 * \return GL_TRUE for success, GL_FALSE if out of memory
175 */
176 static GLboolean
177 st_bufferobj_data(struct gl_context *ctx,
178 GLenum target,
179 GLsizeiptrARB size,
180 const GLvoid * data,
181 GLenum usage,
182 GLbitfield storageFlags,
183 struct gl_buffer_object *obj)
184 {
185 struct st_context *st = st_context(ctx);
186 struct pipe_context *pipe = st->pipe;
187 struct st_buffer_object *st_obj = st_buffer_object(obj);
188 unsigned bind, pipe_usage, pipe_flags = 0;
189
190 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
191 size && data && st_obj->buffer &&
192 st_obj->Base.Size == size &&
193 st_obj->Base.Usage == usage &&
194 st_obj->Base.StorageFlags == storageFlags) {
195 /* Just discard the old contents and write new data.
196 * This should be the same as creating a new buffer, but we avoid
197 * a lot of validation in Mesa.
198 */
199 struct pipe_box box;
200
201 u_box_1d(0, size, &box);
202 pipe->transfer_inline_write(pipe, st_obj->buffer, 0,
203 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
204 &box, data, 0, 0);
205 return GL_TRUE;
206 }
207
208 st_obj->Base.Size = size;
209 st_obj->Base.Usage = usage;
210 st_obj->Base.StorageFlags = storageFlags;
211
212 switch (target) {
213 case GL_PIXEL_PACK_BUFFER_ARB:
214 case GL_PIXEL_UNPACK_BUFFER_ARB:
215 bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
216 break;
217 case GL_ARRAY_BUFFER_ARB:
218 bind = PIPE_BIND_VERTEX_BUFFER;
219 break;
220 case GL_ELEMENT_ARRAY_BUFFER_ARB:
221 bind = PIPE_BIND_INDEX_BUFFER;
222 break;
223 case GL_TEXTURE_BUFFER:
224 bind = PIPE_BIND_SAMPLER_VIEW;
225 break;
226 case GL_TRANSFORM_FEEDBACK_BUFFER:
227 bind = PIPE_BIND_STREAM_OUTPUT;
228 break;
229 case GL_UNIFORM_BUFFER:
230 bind = PIPE_BIND_CONSTANT_BUFFER;
231 break;
232 case GL_DRAW_INDIRECT_BUFFER:
233 bind = PIPE_BIND_COMMAND_ARGS_BUFFER;
234 break;
235 default:
236 bind = 0;
237 }
238
239 /* Set usage. */
240 if (st_obj->Base.Immutable) {
241 /* BufferStorage */
242 if (storageFlags & GL_CLIENT_STORAGE_BIT)
243 pipe_usage = PIPE_USAGE_STAGING;
244 else
245 pipe_usage = PIPE_USAGE_DEFAULT;
246 }
247 else {
248 /* BufferData */
249 switch (usage) {
250 case GL_STATIC_DRAW:
251 case GL_STATIC_COPY:
252 default:
253 pipe_usage = PIPE_USAGE_DEFAULT;
254 break;
255 case GL_DYNAMIC_DRAW:
256 case GL_DYNAMIC_COPY:
257 pipe_usage = PIPE_USAGE_DYNAMIC;
258 break;
259 case GL_STREAM_DRAW:
260 case GL_STREAM_COPY:
261 /* XXX: Remove this test and fall-through when we have PBO unpacking
262 * acceleration. Right now, PBO unpacking is done by the CPU, so we
263 * have to make sure CPU reads are fast.
264 */
265 if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
266 pipe_usage = PIPE_USAGE_STREAM;
267 break;
268 }
269 /* fall through */
270 case GL_STATIC_READ:
271 case GL_DYNAMIC_READ:
272 case GL_STREAM_READ:
273 pipe_usage = PIPE_USAGE_STAGING;
274 break;
275 }
276 }
277
278 /* Set flags. */
279 if (storageFlags & GL_MAP_PERSISTENT_BIT)
280 pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
281 if (storageFlags & GL_MAP_COHERENT_BIT)
282 pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
283
284 pipe_resource_reference( &st_obj->buffer, NULL );
285
286 if (ST_DEBUG & DEBUG_BUFFER) {
287 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
288 (int64_t) size, bind);
289 }
290
291 if (size != 0) {
292 struct pipe_screen *screen = pipe->screen;
293 struct pipe_resource buffer;
294
295 memset(&buffer, 0, sizeof buffer);
296 buffer.target = PIPE_BUFFER;
297 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
298 buffer.bind = bind;
299 buffer.usage = pipe_usage;
300 buffer.flags = pipe_flags;
301 buffer.width0 = size;
302 buffer.height0 = 1;
303 buffer.depth0 = 1;
304 buffer.array_size = 1;
305
306 if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
307 st_obj->buffer =
308 screen->resource_from_user_memory(screen, &buffer, (void*)data);
309 }
310 else {
311 st_obj->buffer = screen->resource_create(screen, &buffer);
312
313 if (st_obj->buffer && data)
314 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
315 }
316
317 if (!st_obj->buffer) {
318 /* out of memory */
319 st_obj->Base.Size = 0;
320 return GL_FALSE;
321 }
322 }
323
324 /* BufferData may change an array or uniform buffer, need to update it */
325 st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER;
326
327 return GL_TRUE;
328 }
329
330
331 /**
332 * Called via glMapBufferRange().
333 */
334 static void *
335 st_bufferobj_map_range(struct gl_context *ctx,
336 GLintptr offset, GLsizeiptr length, GLbitfield access,
337 struct gl_buffer_object *obj,
338 gl_map_buffer_index index)
339 {
340 struct pipe_context *pipe = st_context(ctx)->pipe;
341 struct st_buffer_object *st_obj = st_buffer_object(obj);
342 enum pipe_transfer_usage flags = 0x0;
343
344 if (access & GL_MAP_WRITE_BIT)
345 flags |= PIPE_TRANSFER_WRITE;
346
347 if (access & GL_MAP_READ_BIT)
348 flags |= PIPE_TRANSFER_READ;
349
350 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
351 flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
352
353 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
354 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
355 }
356 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
357 if (offset == 0 && length == obj->Size)
358 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
359 else
360 flags |= PIPE_TRANSFER_DISCARD_RANGE;
361 }
362
363 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
364 flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
365
366 if (access & GL_MAP_PERSISTENT_BIT)
367 flags |= PIPE_TRANSFER_PERSISTENT;
368
369 if (access & GL_MAP_COHERENT_BIT)
370 flags |= PIPE_TRANSFER_COHERENT;
371
372 /* ... other flags ...
373 */
374
375 if (access & MESA_MAP_NOWAIT_BIT)
376 flags |= PIPE_TRANSFER_DONTBLOCK;
377
378 assert(offset >= 0);
379 assert(length >= 0);
380 assert(offset < obj->Size);
381 assert(offset + length <= obj->Size);
382
383 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
384 st_obj->buffer,
385 offset, length,
386 flags,
387 &st_obj->transfer[index]);
388 if (obj->Mappings[index].Pointer) {
389 obj->Mappings[index].Offset = offset;
390 obj->Mappings[index].Length = length;
391 obj->Mappings[index].AccessFlags = access;
392 }
393 else {
394 st_obj->transfer[index] = NULL;
395 }
396
397 return obj->Mappings[index].Pointer;
398 }
399
400
401 static void
402 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
403 GLintptr offset, GLsizeiptr length,
404 struct gl_buffer_object *obj,
405 gl_map_buffer_index index)
406 {
407 struct pipe_context *pipe = st_context(ctx)->pipe;
408 struct st_buffer_object *st_obj = st_buffer_object(obj);
409
410 /* Subrange is relative to mapped range */
411 assert(offset >= 0);
412 assert(length >= 0);
413 assert(offset + length <= obj->Mappings[index].Length);
414 assert(obj->Mappings[index].Pointer);
415
416 if (!length)
417 return;
418
419 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
420 obj->Mappings[index].Offset + offset,
421 length);
422 }
423
424
425 /**
426 * Called via glUnmapBufferARB().
427 */
428 static GLboolean
429 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
430 gl_map_buffer_index index)
431 {
432 struct pipe_context *pipe = st_context(ctx)->pipe;
433 struct st_buffer_object *st_obj = st_buffer_object(obj);
434
435 if (obj->Mappings[index].Length)
436 pipe_buffer_unmap(pipe, st_obj->transfer[index]);
437
438 st_obj->transfer[index] = NULL;
439 obj->Mappings[index].Pointer = NULL;
440 obj->Mappings[index].Offset = 0;
441 obj->Mappings[index].Length = 0;
442 return GL_TRUE;
443 }
444
445
446 /**
447 * Called via glCopyBufferSubData().
448 */
449 static void
450 st_copy_buffer_subdata(struct gl_context *ctx,
451 struct gl_buffer_object *src,
452 struct gl_buffer_object *dst,
453 GLintptr readOffset, GLintptr writeOffset,
454 GLsizeiptr size)
455 {
456 struct pipe_context *pipe = st_context(ctx)->pipe;
457 struct st_buffer_object *srcObj = st_buffer_object(src);
458 struct st_buffer_object *dstObj = st_buffer_object(dst);
459 struct pipe_box box;
460
461 if (!size)
462 return;
463
464 /* buffer should not already be mapped */
465 assert(!_mesa_check_disallowed_mapping(src));
466 assert(!_mesa_check_disallowed_mapping(dst));
467
468 u_box_1d(readOffset, size, &box);
469
470 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
471 srcObj->buffer, 0, &box);
472 }
473
474 /**
475 * Called via glClearBufferSubData().
476 */
477 static void
478 st_clear_buffer_subdata(struct gl_context *ctx,
479 GLintptr offset, GLsizeiptr size,
480 const GLvoid *clearValue,
481 GLsizeiptr clearValueSize,
482 struct gl_buffer_object *bufObj)
483 {
484 struct pipe_context *pipe = st_context(ctx)->pipe;
485 struct st_buffer_object *buf = st_buffer_object(bufObj);
486 static const char zeros[16] = {0};
487
488 if (!pipe->clear_buffer) {
489 _mesa_ClearBufferSubData_sw(ctx, offset, size,
490 clearValue, clearValueSize, bufObj);
491 return;
492 }
493
494 if (!clearValue)
495 clearValue = zeros;
496
497 pipe->clear_buffer(pipe, buf->buffer, offset, size,
498 clearValue, clearValueSize);
499 }
500
501
502 /* TODO: if buffer wasn't created with appropriate usage flags, need
503 * to recreate it now and copy contents -- or possibly create a
504 * gallium entrypoint to extend the usage flags and let the driver
505 * decide if a copy is necessary.
506 */
507 void
508 st_bufferobj_validate_usage(struct st_context *st,
509 struct st_buffer_object *obj,
510 unsigned usage)
511 {
512 }
513
514
515 void
516 st_init_bufferobject_functions(struct dd_function_table *functions)
517 {
518 /* plug in default driver fallbacks (such as for ClearBufferSubData) */
519 _mesa_init_buffer_object_functions(functions);
520
521 functions->NewBufferObject = st_bufferobj_alloc;
522 functions->DeleteBuffer = st_bufferobj_free;
523 functions->BufferData = st_bufferobj_data;
524 functions->BufferSubData = st_bufferobj_subdata;
525 functions->GetBufferSubData = st_bufferobj_get_subdata;
526 functions->MapBufferRange = st_bufferobj_map_range;
527 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
528 functions->UnmapBuffer = st_bufferobj_unmap;
529 functions->CopyBufferSubData = st_copy_buffer_subdata;
530 functions->ClearBufferSubData = st_clear_buffer_subdata;
531 }