nir: Move nir_lower_uniforms_to_ubo to compiler/nir.
[mesa.git] / src / mesa / state_tracker / st_cb_bufferobjects.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Functions for pixel buffer objects and vertex/element buffer objects.
31 */
32
33
34 #include <inttypes.h> /* for PRId64 macro */
35
36 #include "main/errors.h"
37 #include "main/imports.h"
38 #include "main/mtypes.h"
39 #include "main/arrayobj.h"
40 #include "main/bufferobj.h"
41
42 #include "st_context.h"
43 #include "st_cb_bufferobjects.h"
44 #include "st_cb_memoryobjects.h"
45 #include "st_debug.h"
46
47 #include "pipe/p_context.h"
48 #include "pipe/p_defines.h"
49 #include "util/u_inlines.h"
50
51
52 /**
53 * There is some duplication between mesa's bufferobjects and our
54 * bufmgr buffers. Both have an integer handle and a hashtable to
55 * lookup an opaque structure. It would be nice if the handles and
56 * internal structure where somehow shared.
57 */
58 static struct gl_buffer_object *
59 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
60 {
61 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
62
63 if (!st_obj)
64 return NULL;
65
66 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
67
68 return &st_obj->Base;
69 }
70
71
72
73 /**
74 * Deallocate/free a vertex/pixel buffer object.
75 * Called via glDeleteBuffersARB().
76 */
77 static void
78 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
79 {
80 struct st_buffer_object *st_obj = st_buffer_object(obj);
81
82 assert(obj->RefCount == 0);
83 _mesa_buffer_unmap_all_mappings(ctx, obj);
84
85 if (st_obj->buffer)
86 pipe_resource_reference(&st_obj->buffer, NULL);
87
88 _mesa_delete_buffer_object(ctx, obj);
89 }
90
91
92
93 /**
94 * Replace data in a subrange of buffer object. If the data range
95 * specified by size + offset extends beyond the end of the buffer or
96 * if data is NULL, no copy is performed.
97 * Called via glBufferSubDataARB().
98 */
99 static void
100 st_bufferobj_subdata(struct gl_context *ctx,
101 GLintptrARB offset,
102 GLsizeiptrARB size,
103 const void * data, struct gl_buffer_object *obj)
104 {
105 struct st_buffer_object *st_obj = st_buffer_object(obj);
106
107 /* we may be called from VBO code, so double-check params here */
108 assert(offset >= 0);
109 assert(size >= 0);
110 assert(offset + size <= obj->Size);
111
112 if (!size)
113 return;
114
115 /*
116 * According to ARB_vertex_buffer_object specification, if data is null,
117 * then the contents of the buffer object's data store is undefined. We just
118 * ignore, and leave it unchanged.
119 */
120 if (!data)
121 return;
122
123 if (!st_obj->buffer) {
124 /* we probably ran out of memory during buffer allocation */
125 return;
126 }
127
128 /* Now that transfers are per-context, we don't have to figure out
129 * flushing here. Usually drivers won't need to flush in this case
130 * even if the buffer is currently referenced by hardware - they
131 * just queue the upload as dma rather than mapping the underlying
132 * buffer directly.
133 */
134 pipe_buffer_write(st_context(ctx)->pipe,
135 st_obj->buffer,
136 offset, size, data);
137 }
138
139
140 /**
141 * Called via glGetBufferSubDataARB().
142 */
143 static void
144 st_bufferobj_get_subdata(struct gl_context *ctx,
145 GLintptrARB offset,
146 GLsizeiptrARB size,
147 void * data, struct gl_buffer_object *obj)
148 {
149 struct st_buffer_object *st_obj = st_buffer_object(obj);
150
151 /* we may be called from VBO code, so double-check params here */
152 assert(offset >= 0);
153 assert(size >= 0);
154 assert(offset + size <= obj->Size);
155
156 if (!size)
157 return;
158
159 if (!st_obj->buffer) {
160 /* we probably ran out of memory during buffer allocation */
161 return;
162 }
163
164 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
165 offset, size, data);
166 }
167
168
169 /**
170 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
171 */
172 static unsigned
173 buffer_target_to_bind_flags(GLenum target)
174 {
175 switch (target) {
176 case GL_PIXEL_PACK_BUFFER_ARB:
177 case GL_PIXEL_UNPACK_BUFFER_ARB:
178 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
179 case GL_ARRAY_BUFFER_ARB:
180 return PIPE_BIND_VERTEX_BUFFER;
181 case GL_ELEMENT_ARRAY_BUFFER_ARB:
182 return PIPE_BIND_INDEX_BUFFER;
183 case GL_TEXTURE_BUFFER:
184 return PIPE_BIND_SAMPLER_VIEW;
185 case GL_TRANSFORM_FEEDBACK_BUFFER:
186 return PIPE_BIND_STREAM_OUTPUT;
187 case GL_UNIFORM_BUFFER:
188 return PIPE_BIND_CONSTANT_BUFFER;
189 case GL_DRAW_INDIRECT_BUFFER:
190 case GL_PARAMETER_BUFFER_ARB:
191 return PIPE_BIND_COMMAND_ARGS_BUFFER;
192 case GL_ATOMIC_COUNTER_BUFFER:
193 case GL_SHADER_STORAGE_BUFFER:
194 return PIPE_BIND_SHADER_BUFFER;
195 case GL_QUERY_BUFFER:
196 return PIPE_BIND_QUERY_BUFFER;
197 default:
198 return 0;
199 }
200 }
201
202
203 /**
204 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
205 */
206 static unsigned
207 storage_flags_to_buffer_flags(GLbitfield storageFlags)
208 {
209 unsigned flags = 0;
210 if (storageFlags & GL_MAP_PERSISTENT_BIT)
211 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
212 if (storageFlags & GL_MAP_COHERENT_BIT)
213 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
214 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
215 flags |= PIPE_RESOURCE_FLAG_SPARSE;
216 return flags;
217 }
218
219
220 /**
221 * From a buffer object's target, immutability flag, storage flags and
222 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
223 * STREAM, etc).
224 */
225 static const enum pipe_resource_usage
226 buffer_usage(GLenum target, GLboolean immutable,
227 GLbitfield storageFlags, GLenum usage)
228 {
229 if (immutable) {
230 /* BufferStorage */
231 if (storageFlags & GL_CLIENT_STORAGE_BIT) {
232 if (storageFlags & GL_MAP_READ_BIT)
233 return PIPE_USAGE_STAGING;
234 else
235 return PIPE_USAGE_STREAM;
236 } else {
237 return PIPE_USAGE_DEFAULT;
238 }
239 }
240 else {
241 /* BufferData */
242 switch (usage) {
243 case GL_DYNAMIC_DRAW:
244 case GL_DYNAMIC_COPY:
245 return PIPE_USAGE_DYNAMIC;
246 case GL_STREAM_DRAW:
247 case GL_STREAM_COPY:
248 /* XXX: Remove this test and fall-through when we have PBO unpacking
249 * acceleration. Right now, PBO unpacking is done by the CPU, so we
250 * have to make sure CPU reads are fast.
251 */
252 if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
253 return PIPE_USAGE_STREAM;
254 }
255 /* fall through */
256 case GL_STATIC_READ:
257 case GL_DYNAMIC_READ:
258 case GL_STREAM_READ:
259 return PIPE_USAGE_STAGING;
260 case GL_STATIC_DRAW:
261 case GL_STATIC_COPY:
262 default:
263 return PIPE_USAGE_DEFAULT;
264 }
265 }
266 }
267
268
269 static ALWAYS_INLINE GLboolean
270 bufferobj_data(struct gl_context *ctx,
271 GLenum target,
272 GLsizeiptrARB size,
273 const void *data,
274 struct gl_memory_object *memObj,
275 GLuint64 offset,
276 GLenum usage,
277 GLbitfield storageFlags,
278 struct gl_buffer_object *obj)
279 {
280 struct st_context *st = st_context(ctx);
281 struct pipe_context *pipe = st->pipe;
282 struct pipe_screen *screen = pipe->screen;
283 struct st_buffer_object *st_obj = st_buffer_object(obj);
284 struct st_memory_object *st_mem_obj = st_memory_object(memObj);
285
286 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
287 size && st_obj->buffer &&
288 st_obj->Base.Size == size &&
289 st_obj->Base.Usage == usage &&
290 st_obj->Base.StorageFlags == storageFlags) {
291 if (data) {
292 /* Just discard the old contents and write new data.
293 * This should be the same as creating a new buffer, but we avoid
294 * a lot of validation in Mesa.
295 */
296 pipe->buffer_subdata(pipe, st_obj->buffer,
297 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
298 0, size, data);
299 return GL_TRUE;
300 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
301 pipe->invalidate_resource(pipe, st_obj->buffer);
302 return GL_TRUE;
303 }
304 }
305
306 st_obj->Base.Size = size;
307 st_obj->Base.Usage = usage;
308 st_obj->Base.StorageFlags = storageFlags;
309
310 pipe_resource_reference( &st_obj->buffer, NULL );
311
312 const unsigned bindings = buffer_target_to_bind_flags(target);
313
314 if (ST_DEBUG & DEBUG_BUFFER) {
315 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
316 (int64_t) size, bindings);
317 }
318
319 if (size != 0) {
320 struct pipe_resource buffer;
321
322 memset(&buffer, 0, sizeof buffer);
323 buffer.target = PIPE_BUFFER;
324 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
325 buffer.bind = bindings;
326 buffer.usage =
327 buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
328 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
329 buffer.width0 = size;
330 buffer.height0 = 1;
331 buffer.depth0 = 1;
332 buffer.array_size = 1;
333
334 if (st_mem_obj) {
335 st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
336 st_mem_obj->memory,
337 offset);
338 }
339 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
340 st_obj->buffer =
341 screen->resource_from_user_memory(screen, &buffer, (void*)data);
342 }
343 else {
344 st_obj->buffer = screen->resource_create(screen, &buffer);
345
346 if (st_obj->buffer && data)
347 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
348 }
349
350 if (!st_obj->buffer) {
351 /* out of memory */
352 st_obj->Base.Size = 0;
353 return GL_FALSE;
354 }
355 }
356
357 /* The current buffer may be bound, so we have to revalidate all atoms that
358 * might be using it.
359 */
360 if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
361 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
362 /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
363 /* ctx->NewDriverState |= TODO: Handle indices as gallium state; */
364 if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
365 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
366 if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
367 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
368 if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
369 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
370 if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
371 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
372
373 return GL_TRUE;
374 }
375
376 /**
377 * Allocate space for and store data in a buffer object. Any data that was
378 * previously stored in the buffer object is lost. If data is NULL,
379 * memory will be allocated, but no copy will occur.
380 * Called via ctx->Driver.BufferData().
381 * \return GL_TRUE for success, GL_FALSE if out of memory
382 */
383 static GLboolean
384 st_bufferobj_data(struct gl_context *ctx,
385 GLenum target,
386 GLsizeiptrARB size,
387 const void *data,
388 GLenum usage,
389 GLbitfield storageFlags,
390 struct gl_buffer_object *obj)
391 {
392 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
393 }
394
395 static GLboolean
396 st_bufferobj_data_mem(struct gl_context *ctx,
397 GLenum target,
398 GLsizeiptrARB size,
399 struct gl_memory_object *memObj,
400 GLuint64 offset,
401 GLenum usage,
402 struct gl_buffer_object *bufObj)
403 {
404 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
405 }
406
407 /**
408 * Called via glInvalidateBuffer(Sub)Data.
409 */
410 static void
411 st_bufferobj_invalidate(struct gl_context *ctx,
412 struct gl_buffer_object *obj,
413 GLintptr offset,
414 GLsizeiptr size)
415 {
416 struct st_context *st = st_context(ctx);
417 struct pipe_context *pipe = st->pipe;
418 struct st_buffer_object *st_obj = st_buffer_object(obj);
419
420 /* We ignore partial invalidates. */
421 if (offset != 0 || size != obj->Size)
422 return;
423
424 /* Nothing to invalidate. */
425 if (!st_obj->buffer)
426 return;
427
428 pipe->invalidate_resource(pipe, st_obj->buffer);
429 }
430
431
432 /**
433 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
434 * \param wholeBuffer is the whole buffer being mapped?
435 */
436 enum pipe_transfer_usage
437 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
438 {
439 enum pipe_transfer_usage flags = 0;
440
441 if (access & GL_MAP_WRITE_BIT)
442 flags |= PIPE_TRANSFER_WRITE;
443
444 if (access & GL_MAP_READ_BIT)
445 flags |= PIPE_TRANSFER_READ;
446
447 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
448 flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
449
450 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
451 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
452 }
453 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
454 if (wholeBuffer)
455 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
456 else
457 flags |= PIPE_TRANSFER_DISCARD_RANGE;
458 }
459
460 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
461 flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
462
463 if (access & GL_MAP_PERSISTENT_BIT)
464 flags |= PIPE_TRANSFER_PERSISTENT;
465
466 if (access & GL_MAP_COHERENT_BIT)
467 flags |= PIPE_TRANSFER_COHERENT;
468
469 /* ... other flags ...
470 */
471
472 if (access & MESA_MAP_NOWAIT_BIT)
473 flags |= PIPE_TRANSFER_DONTBLOCK;
474
475 return flags;
476 }
477
478
479 /**
480 * Called via glMapBufferRange().
481 */
482 static void *
483 st_bufferobj_map_range(struct gl_context *ctx,
484 GLintptr offset, GLsizeiptr length, GLbitfield access,
485 struct gl_buffer_object *obj,
486 gl_map_buffer_index index)
487 {
488 struct pipe_context *pipe = st_context(ctx)->pipe;
489 struct st_buffer_object *st_obj = st_buffer_object(obj);
490
491 assert(offset >= 0);
492 assert(length >= 0);
493 assert(offset < obj->Size);
494 assert(offset + length <= obj->Size);
495
496 const enum pipe_transfer_usage transfer_flags =
497 st_access_flags_to_transfer_flags(access,
498 offset == 0 && length == obj->Size);
499
500 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
501 st_obj->buffer,
502 offset, length,
503 transfer_flags,
504 &st_obj->transfer[index]);
505 if (obj->Mappings[index].Pointer) {
506 obj->Mappings[index].Offset = offset;
507 obj->Mappings[index].Length = length;
508 obj->Mappings[index].AccessFlags = access;
509 }
510 else {
511 st_obj->transfer[index] = NULL;
512 }
513
514 return obj->Mappings[index].Pointer;
515 }
516
517
518 static void
519 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
520 GLintptr offset, GLsizeiptr length,
521 struct gl_buffer_object *obj,
522 gl_map_buffer_index index)
523 {
524 struct pipe_context *pipe = st_context(ctx)->pipe;
525 struct st_buffer_object *st_obj = st_buffer_object(obj);
526
527 /* Subrange is relative to mapped range */
528 assert(offset >= 0);
529 assert(length >= 0);
530 assert(offset + length <= obj->Mappings[index].Length);
531 assert(obj->Mappings[index].Pointer);
532
533 if (!length)
534 return;
535
536 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
537 obj->Mappings[index].Offset + offset,
538 length);
539 }
540
541
542 /**
543 * Called via glUnmapBufferARB().
544 */
545 static GLboolean
546 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
547 gl_map_buffer_index index)
548 {
549 struct pipe_context *pipe = st_context(ctx)->pipe;
550 struct st_buffer_object *st_obj = st_buffer_object(obj);
551
552 if (obj->Mappings[index].Length)
553 pipe_buffer_unmap(pipe, st_obj->transfer[index]);
554
555 st_obj->transfer[index] = NULL;
556 obj->Mappings[index].Pointer = NULL;
557 obj->Mappings[index].Offset = 0;
558 obj->Mappings[index].Length = 0;
559 return GL_TRUE;
560 }
561
562
563 /**
564 * Called via glCopyBufferSubData().
565 */
566 static void
567 st_copy_buffer_subdata(struct gl_context *ctx,
568 struct gl_buffer_object *src,
569 struct gl_buffer_object *dst,
570 GLintptr readOffset, GLintptr writeOffset,
571 GLsizeiptr size)
572 {
573 struct pipe_context *pipe = st_context(ctx)->pipe;
574 struct st_buffer_object *srcObj = st_buffer_object(src);
575 struct st_buffer_object *dstObj = st_buffer_object(dst);
576 struct pipe_box box;
577
578 if (!size)
579 return;
580
581 /* buffer should not already be mapped */
582 assert(!_mesa_check_disallowed_mapping(src));
583 assert(!_mesa_check_disallowed_mapping(dst));
584
585 u_box_1d(readOffset, size, &box);
586
587 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
588 srcObj->buffer, 0, &box);
589 }
590
591 /**
592 * Called via glClearBufferSubData().
593 */
594 static void
595 st_clear_buffer_subdata(struct gl_context *ctx,
596 GLintptr offset, GLsizeiptr size,
597 const void *clearValue,
598 GLsizeiptr clearValueSize,
599 struct gl_buffer_object *bufObj)
600 {
601 struct pipe_context *pipe = st_context(ctx)->pipe;
602 struct st_buffer_object *buf = st_buffer_object(bufObj);
603 static const char zeros[16] = {0};
604
605 if (!pipe->clear_buffer) {
606 _mesa_ClearBufferSubData_sw(ctx, offset, size,
607 clearValue, clearValueSize, bufObj);
608 return;
609 }
610
611 if (!clearValue)
612 clearValue = zeros;
613
614 pipe->clear_buffer(pipe, buf->buffer, offset, size,
615 clearValue, clearValueSize);
616 }
617
618 static void
619 st_bufferobj_page_commitment(struct gl_context *ctx,
620 struct gl_buffer_object *bufferObj,
621 GLintptr offset, GLsizeiptr size,
622 GLboolean commit)
623 {
624 struct pipe_context *pipe = st_context(ctx)->pipe;
625 struct st_buffer_object *buf = st_buffer_object(bufferObj);
626 struct pipe_box box;
627
628 u_box_1d(offset, size, &box);
629
630 if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
631 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
632 return;
633 }
634 }
635
636 void
637 st_init_bufferobject_functions(struct pipe_screen *screen,
638 struct dd_function_table *functions)
639 {
640 functions->NewBufferObject = st_bufferobj_alloc;
641 functions->DeleteBuffer = st_bufferobj_free;
642 functions->BufferData = st_bufferobj_data;
643 functions->BufferDataMem = st_bufferobj_data_mem;
644 functions->BufferSubData = st_bufferobj_subdata;
645 functions->GetBufferSubData = st_bufferobj_get_subdata;
646 functions->MapBufferRange = st_bufferobj_map_range;
647 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
648 functions->UnmapBuffer = st_bufferobj_unmap;
649 functions->CopyBufferSubData = st_copy_buffer_subdata;
650 functions->ClearBufferSubData = st_clear_buffer_subdata;
651 functions->BufferPageCommitment = st_bufferobj_page_commitment;
652
653 if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
654 functions->InvalidateBufferSubData = st_bufferobj_invalidate;
655 }