gallium/util: Break recursion in pipe_resource_reference
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *reference, unsigned count)
56 {
57 p_atomic_set(&reference->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *reference)
62 {
63 return p_atomic_read(&reference->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *ptr,
74 struct pipe_reference *reference,
75 debug_reference_descriptor get_desc)
76 {
77 boolean destroy = FALSE;
78
79 if(ptr != reference) {
80 /* bump the reference.count first */
81 if (reference) {
82 assert(pipe_is_referenced(reference));
83 p_atomic_inc(&reference->count);
84 debug_reference(reference, get_desc, 1);
85 }
86
87 if (ptr) {
88 assert(pipe_is_referenced(ptr));
89 if (p_atomic_dec_zero(&ptr->count)) {
90 destroy = TRUE;
91 }
92 debug_reference(ptr, get_desc, -1);
93 }
94 }
95
96 return destroy;
97 }
98
99 static inline boolean
100 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101 {
102 return pipe_reference_described(ptr, reference,
103 (debug_reference_descriptor)debug_describe_reference);
104 }
105
106 static inline void
107 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108 {
109 struct pipe_surface *old_surf = *ptr;
110
111 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112 (debug_reference_descriptor)debug_describe_surface))
113 old_surf->context->surface_destroy(old_surf->context, old_surf);
114 *ptr = surf;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 if (pipe_reference_described(&(*ptr)->reference, NULL,
127 (debug_reference_descriptor)debug_describe_surface))
128 pipe->surface_destroy(pipe, *ptr);
129 *ptr = NULL;
130 }
131
132
133 static inline void
134 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135 {
136 struct pipe_resource *old_tex = *ptr;
137
138 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139 (debug_reference_descriptor)debug_describe_resource)) {
140 /* Avoid recursion, which would prevent inlining this function */
141 do {
142 struct pipe_resource *next = old_tex->next;
143
144 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
145 old_tex = next;
146 } while (pipe_reference_described(&old_tex->reference, NULL,
147 (debug_reference_descriptor)debug_describe_resource));
148 }
149 *ptr = tex;
150 }
151
152 static inline void
153 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
154 {
155 struct pipe_sampler_view *old_view = *ptr;
156
157 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
158 (debug_reference_descriptor)debug_describe_sampler_view))
159 old_view->context->sampler_view_destroy(old_view->context, old_view);
160 *ptr = view;
161 }
162
163 /**
164 * Similar to pipe_sampler_view_reference() but always set the pointer to
165 * NULL and pass in an explicit context. Passing an explicit context is a
166 * work-around for fixing a dangling context pointer problem when textures
167 * are shared by multiple contexts. XXX fix this someday.
168 */
169 static inline void
170 pipe_sampler_view_release(struct pipe_context *ctx,
171 struct pipe_sampler_view **ptr)
172 {
173 struct pipe_sampler_view *old_view = *ptr;
174 if (*ptr && (*ptr)->context != ctx) {
175 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
176 }
177 if (pipe_reference_described(&(*ptr)->reference, NULL,
178 (debug_reference_descriptor)debug_describe_sampler_view)) {
179 ctx->sampler_view_destroy(ctx, old_view);
180 }
181 *ptr = NULL;
182 }
183
184 static inline void
185 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
186 struct pipe_stream_output_target *target)
187 {
188 struct pipe_stream_output_target *old = *ptr;
189
190 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
191 (debug_reference_descriptor)debug_describe_so_target))
192 old->context->stream_output_target_destroy(old->context, old);
193 *ptr = target;
194 }
195
196 static inline void
197 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
198 {
199 if (dst->is_user_buffer)
200 dst->buffer.user = NULL;
201 else
202 pipe_resource_reference(&dst->buffer.resource, NULL);
203 }
204
205 static inline void
206 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
207 const struct pipe_vertex_buffer *src)
208 {
209 pipe_vertex_buffer_unreference(dst);
210 if (!src->is_user_buffer)
211 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
212 memcpy(dst, src, sizeof(*src));
213 }
214
215 static inline void
216 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
217 struct pipe_resource *pt, unsigned level, unsigned layer)
218 {
219 pipe_resource_reference(&ps->texture, pt);
220 ps->format = pt->format;
221 ps->width = u_minify(pt->width0, level);
222 ps->height = u_minify(pt->height0, level);
223 ps->u.tex.level = level;
224 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
225 ps->context = ctx;
226 }
227
228 static inline void
229 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
230 struct pipe_resource *pt, unsigned level, unsigned layer)
231 {
232 ps->texture = 0;
233 pipe_reference_init(&ps->reference, 1);
234 pipe_surface_reset(ctx, ps, pt, level, layer);
235 }
236
237 /* Return true if the surfaces are equal. */
238 static inline boolean
239 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
240 {
241 return s1->texture == s2->texture &&
242 s1->format == s2->format &&
243 (s1->texture->target != PIPE_BUFFER ||
244 (s1->u.buf.first_element == s2->u.buf.first_element &&
245 s1->u.buf.last_element == s2->u.buf.last_element)) &&
246 (s1->texture->target == PIPE_BUFFER ||
247 (s1->u.tex.level == s2->u.tex.level &&
248 s1->u.tex.first_layer == s2->u.tex.first_layer &&
249 s1->u.tex.last_layer == s2->u.tex.last_layer));
250 }
251
252 /*
253 * Convenience wrappers for screen buffer functions.
254 */
255
256
257 /**
258 * Create a new resource.
259 * \param bind bitmask of PIPE_BIND_x flags
260 * \param usage a PIPE_USAGE_x value
261 */
262 static inline struct pipe_resource *
263 pipe_buffer_create( struct pipe_screen *screen,
264 unsigned bind,
265 enum pipe_resource_usage usage,
266 unsigned size )
267 {
268 struct pipe_resource buffer;
269 memset(&buffer, 0, sizeof buffer);
270 buffer.target = PIPE_BUFFER;
271 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
272 buffer.bind = bind;
273 buffer.usage = usage;
274 buffer.flags = 0;
275 buffer.width0 = size;
276 buffer.height0 = 1;
277 buffer.depth0 = 1;
278 buffer.array_size = 1;
279 return screen->resource_create(screen, &buffer);
280 }
281
282
283 /**
284 * Map a range of a resource.
285 * \param offset start of region, in bytes
286 * \param length size of region, in bytes
287 * \param access bitmask of PIPE_TRANSFER_x flags
288 * \param transfer returns a transfer object
289 */
290 static inline void *
291 pipe_buffer_map_range(struct pipe_context *pipe,
292 struct pipe_resource *buffer,
293 unsigned offset,
294 unsigned length,
295 unsigned access,
296 struct pipe_transfer **transfer)
297 {
298 struct pipe_box box;
299 void *map;
300
301 assert(offset < buffer->width0);
302 assert(offset + length <= buffer->width0);
303 assert(length);
304
305 u_box_1d(offset, length, &box);
306
307 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
308 if (!map) {
309 return NULL;
310 }
311
312 return map;
313 }
314
315
316 /**
317 * Map whole resource.
318 * \param access bitmask of PIPE_TRANSFER_x flags
319 * \param transfer returns a transfer object
320 */
321 static inline void *
322 pipe_buffer_map(struct pipe_context *pipe,
323 struct pipe_resource *buffer,
324 unsigned access,
325 struct pipe_transfer **transfer)
326 {
327 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
328 }
329
330
331 static inline void
332 pipe_buffer_unmap(struct pipe_context *pipe,
333 struct pipe_transfer *transfer)
334 {
335 pipe->transfer_unmap(pipe, transfer);
336 }
337
338 static inline void
339 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
340 struct pipe_transfer *transfer,
341 unsigned offset,
342 unsigned length)
343 {
344 struct pipe_box box;
345 int transfer_offset;
346
347 assert(length);
348 assert(transfer->box.x <= (int) offset);
349 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
350
351 /* Match old screen->buffer_flush_mapped_range() behaviour, where
352 * offset parameter is relative to the start of the buffer, not the
353 * mapped range.
354 */
355 transfer_offset = offset - transfer->box.x;
356
357 u_box_1d(transfer_offset, length, &box);
358
359 pipe->transfer_flush_region(pipe, transfer, &box);
360 }
361
362 static inline void
363 pipe_buffer_write(struct pipe_context *pipe,
364 struct pipe_resource *buf,
365 unsigned offset,
366 unsigned size,
367 const void *data)
368 {
369 /* Don't set any other usage bits. Drivers should derive them. */
370 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
371 }
372
373 /**
374 * Special case for writing non-overlapping ranges.
375 *
376 * We can avoid GPU/CPU synchronization when writing range that has never
377 * been written before.
378 */
379 static inline void
380 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
381 struct pipe_resource *buf,
382 unsigned offset, unsigned size,
383 const void *data)
384 {
385 pipe->buffer_subdata(pipe, buf,
386 (PIPE_TRANSFER_WRITE |
387 PIPE_TRANSFER_UNSYNCHRONIZED),
388 offset, size, data);
389 }
390
391
392 /**
393 * Create a new resource and immediately put data into it
394 * \param bind bitmask of PIPE_BIND_x flags
395 * \param usage bitmask of PIPE_USAGE_x flags
396 */
397 static inline struct pipe_resource *
398 pipe_buffer_create_with_data(struct pipe_context *pipe,
399 unsigned bind,
400 enum pipe_resource_usage usage,
401 unsigned size,
402 const void *ptr)
403 {
404 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
405 bind, usage, size);
406 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
407 return res;
408 }
409
410 static inline void
411 pipe_buffer_read(struct pipe_context *pipe,
412 struct pipe_resource *buf,
413 unsigned offset,
414 unsigned size,
415 void *data)
416 {
417 struct pipe_transfer *src_transfer;
418 ubyte *map;
419
420 map = (ubyte *) pipe_buffer_map_range(pipe,
421 buf,
422 offset, size,
423 PIPE_TRANSFER_READ,
424 &src_transfer);
425 if (!map)
426 return;
427
428 memcpy(data, map, size);
429 pipe_buffer_unmap(pipe, src_transfer);
430 }
431
432
433 /**
434 * Map a resource for reading/writing.
435 * \param access bitmask of PIPE_TRANSFER_x flags
436 */
437 static inline void *
438 pipe_transfer_map(struct pipe_context *context,
439 struct pipe_resource *resource,
440 unsigned level, unsigned layer,
441 unsigned access,
442 unsigned x, unsigned y,
443 unsigned w, unsigned h,
444 struct pipe_transfer **transfer)
445 {
446 struct pipe_box box;
447 u_box_2d_zslice(x, y, layer, w, h, &box);
448 return context->transfer_map(context,
449 resource,
450 level,
451 access,
452 &box, transfer);
453 }
454
455
456 /**
457 * Map a 3D (texture) resource for reading/writing.
458 * \param access bitmask of PIPE_TRANSFER_x flags
459 */
460 static inline void *
461 pipe_transfer_map_3d(struct pipe_context *context,
462 struct pipe_resource *resource,
463 unsigned level,
464 unsigned access,
465 unsigned x, unsigned y, unsigned z,
466 unsigned w, unsigned h, unsigned d,
467 struct pipe_transfer **transfer)
468 {
469 struct pipe_box box;
470 u_box_3d(x, y, z, w, h, d, &box);
471 return context->transfer_map(context,
472 resource,
473 level,
474 access,
475 &box, transfer);
476 }
477
478 static inline void
479 pipe_transfer_unmap( struct pipe_context *context,
480 struct pipe_transfer *transfer )
481 {
482 context->transfer_unmap( context, transfer );
483 }
484
485 static inline void
486 pipe_set_constant_buffer(struct pipe_context *pipe,
487 enum pipe_shader_type shader, uint index,
488 struct pipe_resource *buf)
489 {
490 if (buf) {
491 struct pipe_constant_buffer cb;
492 cb.buffer = buf;
493 cb.buffer_offset = 0;
494 cb.buffer_size = buf->width0;
495 cb.user_buffer = NULL;
496 pipe->set_constant_buffer(pipe, shader, index, &cb);
497 } else {
498 pipe->set_constant_buffer(pipe, shader, index, NULL);
499 }
500 }
501
502
503 /**
504 * Get the polygon offset enable/disable flag for the given polygon fill mode.
505 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
506 */
507 static inline boolean
508 util_get_offset(const struct pipe_rasterizer_state *templ,
509 unsigned fill_mode)
510 {
511 switch(fill_mode) {
512 case PIPE_POLYGON_MODE_POINT:
513 return templ->offset_point;
514 case PIPE_POLYGON_MODE_LINE:
515 return templ->offset_line;
516 case PIPE_POLYGON_MODE_FILL:
517 return templ->offset_tri;
518 default:
519 assert(0);
520 return FALSE;
521 }
522 }
523
524 static inline float
525 util_get_min_point_size(const struct pipe_rasterizer_state *state)
526 {
527 /* The point size should be clamped to this value at the rasterizer stage.
528 */
529 return !state->point_quad_rasterization &&
530 !state->point_smooth &&
531 !state->multisample ? 1.0f : 0.0f;
532 }
533
534 static inline void
535 util_query_clear_result(union pipe_query_result *result, unsigned type)
536 {
537 switch (type) {
538 case PIPE_QUERY_OCCLUSION_PREDICATE:
539 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
540 case PIPE_QUERY_GPU_FINISHED:
541 result->b = FALSE;
542 break;
543 case PIPE_QUERY_OCCLUSION_COUNTER:
544 case PIPE_QUERY_TIMESTAMP:
545 case PIPE_QUERY_TIME_ELAPSED:
546 case PIPE_QUERY_PRIMITIVES_GENERATED:
547 case PIPE_QUERY_PRIMITIVES_EMITTED:
548 result->u64 = 0;
549 break;
550 case PIPE_QUERY_SO_STATISTICS:
551 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
552 break;
553 case PIPE_QUERY_TIMESTAMP_DISJOINT:
554 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
555 break;
556 case PIPE_QUERY_PIPELINE_STATISTICS:
557 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
558 break;
559 default:
560 memset(result, 0, sizeof(*result));
561 }
562 }
563
564 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
565 static inline unsigned
566 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
567 unsigned nr_samples)
568 {
569 switch (pipe_tex_target) {
570 case PIPE_BUFFER:
571 return TGSI_TEXTURE_BUFFER;
572
573 case PIPE_TEXTURE_1D:
574 assert(nr_samples <= 1);
575 return TGSI_TEXTURE_1D;
576
577 case PIPE_TEXTURE_2D:
578 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
579
580 case PIPE_TEXTURE_RECT:
581 assert(nr_samples <= 1);
582 return TGSI_TEXTURE_RECT;
583
584 case PIPE_TEXTURE_3D:
585 assert(nr_samples <= 1);
586 return TGSI_TEXTURE_3D;
587
588 case PIPE_TEXTURE_CUBE:
589 assert(nr_samples <= 1);
590 return TGSI_TEXTURE_CUBE;
591
592 case PIPE_TEXTURE_1D_ARRAY:
593 assert(nr_samples <= 1);
594 return TGSI_TEXTURE_1D_ARRAY;
595
596 case PIPE_TEXTURE_2D_ARRAY:
597 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
598 TGSI_TEXTURE_2D_ARRAY;
599
600 case PIPE_TEXTURE_CUBE_ARRAY:
601 return TGSI_TEXTURE_CUBE_ARRAY;
602
603 default:
604 assert(0 && "unexpected texture target");
605 return TGSI_TEXTURE_UNKNOWN;
606 }
607 }
608
609
610 static inline void
611 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
612 const struct pipe_constant_buffer *src)
613 {
614 if (src) {
615 pipe_resource_reference(&dst->buffer, src->buffer);
616 dst->buffer_offset = src->buffer_offset;
617 dst->buffer_size = src->buffer_size;
618 dst->user_buffer = src->user_buffer;
619 }
620 else {
621 pipe_resource_reference(&dst->buffer, NULL);
622 dst->buffer_offset = 0;
623 dst->buffer_size = 0;
624 dst->user_buffer = NULL;
625 }
626 }
627
628 static inline void
629 util_copy_image_view(struct pipe_image_view *dst,
630 const struct pipe_image_view *src)
631 {
632 if (src) {
633 pipe_resource_reference(&dst->resource, src->resource);
634 dst->format = src->format;
635 dst->access = src->access;
636 dst->u = src->u;
637 } else {
638 pipe_resource_reference(&dst->resource, NULL);
639 dst->format = PIPE_FORMAT_NONE;
640 dst->access = 0;
641 memset(&dst->u, 0, sizeof(dst->u));
642 }
643 }
644
645 static inline unsigned
646 util_max_layer(const struct pipe_resource *r, unsigned level)
647 {
648 switch (r->target) {
649 case PIPE_TEXTURE_3D:
650 return u_minify(r->depth0, level) - 1;
651 case PIPE_TEXTURE_CUBE:
652 assert(r->array_size == 6);
653 /* fall-through */
654 case PIPE_TEXTURE_1D_ARRAY:
655 case PIPE_TEXTURE_2D_ARRAY:
656 case PIPE_TEXTURE_CUBE_ARRAY:
657 return r->array_size - 1;
658 default:
659 return 0;
660 }
661 }
662
663 static inline bool
664 util_texrange_covers_whole_level(const struct pipe_resource *tex,
665 unsigned level, unsigned x, unsigned y,
666 unsigned z, unsigned width,
667 unsigned height, unsigned depth)
668 {
669 return x == 0 && y == 0 && z == 0 &&
670 width == u_minify(tex->width0, level) &&
671 height == u_minify(tex->height0, level) &&
672 depth == util_max_layer(tex, level) + 1;
673 }
674
675 #ifdef __cplusplus
676 }
677 #endif
678
679 #endif /* U_INLINES_H */