gallium: Fix uninitialized variable warning in compute test.
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *dst, unsigned count)
56 {
57 p_atomic_set(&dst->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *src)
62 {
63 return p_atomic_read(&src->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'dst' and 'src' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *dst,
74 struct pipe_reference *src,
75 debug_reference_descriptor get_desc)
76 {
77 if (dst != src) {
78 /* bump the src.count first */
79 if (src) {
80 MAYBE_UNUSED int count = p_atomic_inc_return(&src->count);
81 assert(count != 1); /* src had to be referenced */
82 debug_reference(src, get_desc, 1);
83 }
84
85 if (dst) {
86 int count = p_atomic_dec_return(&dst->count);
87 assert(count != -1); /* dst had to be referenced */
88 debug_reference(dst, get_desc, -1);
89 if (!count)
90 return true;
91 }
92 }
93
94 return false;
95 }
96
97 static inline boolean
98 pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
99 {
100 return pipe_reference_described(dst, src,
101 (debug_reference_descriptor)
102 debug_describe_reference);
103 }
104
105 static inline void
106 pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
107 {
108 struct pipe_surface *old_dst = *dst;
109
110 if (pipe_reference_described(&old_dst->reference, &src->reference,
111 (debug_reference_descriptor)
112 debug_describe_surface))
113 old_dst->context->surface_destroy(old_dst->context, old_dst);
114 *dst = src;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 struct pipe_surface *old = *ptr;
127
128 if (pipe_reference_described(&old->reference, NULL,
129 (debug_reference_descriptor)
130 debug_describe_surface))
131 pipe->surface_destroy(pipe, old);
132 *ptr = NULL;
133 }
134
135
136 static inline void
137 pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
138 {
139 struct pipe_resource *old_dst = *dst;
140
141 if (pipe_reference_described(&old_dst->reference, &src->reference,
142 (debug_reference_descriptor)
143 debug_describe_resource)) {
144 /* Avoid recursion, which would prevent inlining this function */
145 do {
146 struct pipe_resource *next = old_dst->next;
147
148 old_dst->screen->resource_destroy(old_dst->screen, old_dst);
149 old_dst = next;
150 } while (pipe_reference_described(&old_dst->reference, NULL,
151 (debug_reference_descriptor)
152 debug_describe_resource));
153 }
154 *dst = src;
155 }
156
157 /**
158 * Set *dst to \p src with proper reference counting.
159 *
160 * The caller must guarantee that \p src and *dst were created in
161 * the same context (if they exist), and that this must be the current context.
162 */
163 static inline void
164 pipe_sampler_view_reference(struct pipe_sampler_view **dst,
165 struct pipe_sampler_view *src)
166 {
167 struct pipe_sampler_view *old_dst = *dst;
168
169 if (pipe_reference_described(&old_dst->reference, &src->reference,
170 (debug_reference_descriptor)
171 debug_describe_sampler_view))
172 old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
173 *dst = src;
174 }
175
176 /**
177 * Similar to pipe_sampler_view_reference() but always set the pointer to
178 * NULL and pass in the current context explicitly.
179 *
180 * If *ptr is non-NULL, it may refer to a view that was created in a different
181 * context (however, that context must still be alive).
182 */
183 static inline void
184 pipe_sampler_view_release(struct pipe_context *ctx,
185 struct pipe_sampler_view **ptr)
186 {
187 struct pipe_sampler_view *old_view = *ptr;
188
189 if (pipe_reference_described(&old_view->reference, NULL,
190 (debug_reference_descriptor)debug_describe_sampler_view)) {
191 ctx->sampler_view_destroy(ctx, old_view);
192 }
193 *ptr = NULL;
194 }
195
196 static inline void
197 pipe_so_target_reference(struct pipe_stream_output_target **dst,
198 struct pipe_stream_output_target *src)
199 {
200 struct pipe_stream_output_target *old_dst = *dst;
201
202 if (pipe_reference_described(&old_dst->reference, &src->reference,
203 (debug_reference_descriptor)debug_describe_so_target))
204 old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
205 *dst = src;
206 }
207
208 static inline void
209 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
210 {
211 if (dst->is_user_buffer)
212 dst->buffer.user = NULL;
213 else
214 pipe_resource_reference(&dst->buffer.resource, NULL);
215 }
216
217 static inline void
218 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
219 const struct pipe_vertex_buffer *src)
220 {
221 pipe_vertex_buffer_unreference(dst);
222 if (!src->is_user_buffer)
223 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
224 memcpy(dst, src, sizeof(*src));
225 }
226
227 static inline void
228 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
229 struct pipe_resource *pt, unsigned level, unsigned layer)
230 {
231 pipe_resource_reference(&ps->texture, pt);
232 ps->format = pt->format;
233 ps->width = u_minify(pt->width0, level);
234 ps->height = u_minify(pt->height0, level);
235 ps->u.tex.level = level;
236 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
237 ps->context = ctx;
238 }
239
240 static inline void
241 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
242 struct pipe_resource *pt, unsigned level, unsigned layer)
243 {
244 ps->texture = 0;
245 pipe_reference_init(&ps->reference, 1);
246 pipe_surface_reset(ctx, ps, pt, level, layer);
247 }
248
249 /* Return true if the surfaces are equal. */
250 static inline boolean
251 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
252 {
253 return s1->texture == s2->texture &&
254 s1->format == s2->format &&
255 (s1->texture->target != PIPE_BUFFER ||
256 (s1->u.buf.first_element == s2->u.buf.first_element &&
257 s1->u.buf.last_element == s2->u.buf.last_element)) &&
258 (s1->texture->target == PIPE_BUFFER ||
259 (s1->u.tex.level == s2->u.tex.level &&
260 s1->u.tex.first_layer == s2->u.tex.first_layer &&
261 s1->u.tex.last_layer == s2->u.tex.last_layer));
262 }
263
264 /*
265 * Convenience wrappers for screen buffer functions.
266 */
267
268
269 /**
270 * Create a new resource.
271 * \param bind bitmask of PIPE_BIND_x flags
272 * \param usage a PIPE_USAGE_x value
273 */
274 static inline struct pipe_resource *
275 pipe_buffer_create(struct pipe_screen *screen,
276 unsigned bind,
277 enum pipe_resource_usage usage,
278 unsigned size)
279 {
280 struct pipe_resource buffer;
281 memset(&buffer, 0, sizeof buffer);
282 buffer.target = PIPE_BUFFER;
283 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
284 buffer.bind = bind;
285 buffer.usage = usage;
286 buffer.flags = 0;
287 buffer.width0 = size;
288 buffer.height0 = 1;
289 buffer.depth0 = 1;
290 buffer.array_size = 1;
291 return screen->resource_create(screen, &buffer);
292 }
293
294
295 static inline struct pipe_resource *
296 pipe_buffer_create_const0(struct pipe_screen *screen,
297 unsigned bind,
298 enum pipe_resource_usage usage,
299 unsigned size)
300 {
301 struct pipe_resource buffer;
302 memset(&buffer, 0, sizeof buffer);
303 buffer.target = PIPE_BUFFER;
304 buffer.format = PIPE_FORMAT_R8_UNORM;
305 buffer.bind = bind;
306 buffer.usage = usage;
307 buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
308 buffer.width0 = size;
309 buffer.height0 = 1;
310 buffer.depth0 = 1;
311 buffer.array_size = 1;
312 return screen->resource_create(screen, &buffer);
313 }
314
315
316 /**
317 * Map a range of a resource.
318 * \param offset start of region, in bytes
319 * \param length size of region, in bytes
320 * \param access bitmask of PIPE_TRANSFER_x flags
321 * \param transfer returns a transfer object
322 */
323 static inline void *
324 pipe_buffer_map_range(struct pipe_context *pipe,
325 struct pipe_resource *buffer,
326 unsigned offset,
327 unsigned length,
328 unsigned access,
329 struct pipe_transfer **transfer)
330 {
331 struct pipe_box box;
332 void *map;
333
334 assert(offset < buffer->width0);
335 assert(offset + length <= buffer->width0);
336 assert(length);
337
338 u_box_1d(offset, length, &box);
339
340 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
341 if (!map) {
342 return NULL;
343 }
344
345 return map;
346 }
347
348
349 /**
350 * Map whole resource.
351 * \param access bitmask of PIPE_TRANSFER_x flags
352 * \param transfer returns a transfer object
353 */
354 static inline void *
355 pipe_buffer_map(struct pipe_context *pipe,
356 struct pipe_resource *buffer,
357 unsigned access,
358 struct pipe_transfer **transfer)
359 {
360 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
361 access, transfer);
362 }
363
364
365 static inline void
366 pipe_buffer_unmap(struct pipe_context *pipe,
367 struct pipe_transfer *transfer)
368 {
369 pipe->transfer_unmap(pipe, transfer);
370 }
371
372 static inline void
373 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
374 struct pipe_transfer *transfer,
375 unsigned offset,
376 unsigned length)
377 {
378 struct pipe_box box;
379 int transfer_offset;
380
381 assert(length);
382 assert(transfer->box.x <= (int) offset);
383 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
384
385 /* Match old screen->buffer_flush_mapped_range() behaviour, where
386 * offset parameter is relative to the start of the buffer, not the
387 * mapped range.
388 */
389 transfer_offset = offset - transfer->box.x;
390
391 u_box_1d(transfer_offset, length, &box);
392
393 pipe->transfer_flush_region(pipe, transfer, &box);
394 }
395
396 static inline void
397 pipe_buffer_write(struct pipe_context *pipe,
398 struct pipe_resource *buf,
399 unsigned offset,
400 unsigned size,
401 const void *data)
402 {
403 /* Don't set any other usage bits. Drivers should derive them. */
404 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
405 }
406
407 /**
408 * Special case for writing non-overlapping ranges.
409 *
410 * We can avoid GPU/CPU synchronization when writing range that has never
411 * been written before.
412 */
413 static inline void
414 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
415 struct pipe_resource *buf,
416 unsigned offset, unsigned size,
417 const void *data)
418 {
419 pipe->buffer_subdata(pipe, buf,
420 (PIPE_TRANSFER_WRITE |
421 PIPE_TRANSFER_UNSYNCHRONIZED),
422 offset, size, data);
423 }
424
425
426 /**
427 * Create a new resource and immediately put data into it
428 * \param bind bitmask of PIPE_BIND_x flags
429 * \param usage bitmask of PIPE_USAGE_x flags
430 */
431 static inline struct pipe_resource *
432 pipe_buffer_create_with_data(struct pipe_context *pipe,
433 unsigned bind,
434 enum pipe_resource_usage usage,
435 unsigned size,
436 const void *ptr)
437 {
438 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
439 bind, usage, size);
440 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
441 return res;
442 }
443
444 static inline void
445 pipe_buffer_read(struct pipe_context *pipe,
446 struct pipe_resource *buf,
447 unsigned offset,
448 unsigned size,
449 void *data)
450 {
451 struct pipe_transfer *src_transfer;
452 ubyte *map;
453
454 map = (ubyte *) pipe_buffer_map_range(pipe,
455 buf,
456 offset, size,
457 PIPE_TRANSFER_READ,
458 &src_transfer);
459 if (!map)
460 return;
461
462 memcpy(data, map, size);
463 pipe_buffer_unmap(pipe, src_transfer);
464 }
465
466
467 /**
468 * Map a resource for reading/writing.
469 * \param access bitmask of PIPE_TRANSFER_x flags
470 */
471 static inline void *
472 pipe_transfer_map(struct pipe_context *context,
473 struct pipe_resource *resource,
474 unsigned level, unsigned layer,
475 unsigned access,
476 unsigned x, unsigned y,
477 unsigned w, unsigned h,
478 struct pipe_transfer **transfer)
479 {
480 struct pipe_box box;
481 u_box_2d_zslice(x, y, layer, w, h, &box);
482 return context->transfer_map(context,
483 resource,
484 level,
485 access,
486 &box, transfer);
487 }
488
489
490 /**
491 * Map a 3D (texture) resource for reading/writing.
492 * \param access bitmask of PIPE_TRANSFER_x flags
493 */
494 static inline void *
495 pipe_transfer_map_3d(struct pipe_context *context,
496 struct pipe_resource *resource,
497 unsigned level,
498 unsigned access,
499 unsigned x, unsigned y, unsigned z,
500 unsigned w, unsigned h, unsigned d,
501 struct pipe_transfer **transfer)
502 {
503 struct pipe_box box;
504 u_box_3d(x, y, z, w, h, d, &box);
505 return context->transfer_map(context,
506 resource,
507 level,
508 access,
509 &box, transfer);
510 }
511
512 static inline void
513 pipe_transfer_unmap(struct pipe_context *context,
514 struct pipe_transfer *transfer)
515 {
516 context->transfer_unmap(context, transfer);
517 }
518
519 static inline void
520 pipe_set_constant_buffer(struct pipe_context *pipe,
521 enum pipe_shader_type shader, uint index,
522 struct pipe_resource *buf)
523 {
524 if (buf) {
525 struct pipe_constant_buffer cb;
526 cb.buffer = buf;
527 cb.buffer_offset = 0;
528 cb.buffer_size = buf->width0;
529 cb.user_buffer = NULL;
530 pipe->set_constant_buffer(pipe, shader, index, &cb);
531 } else {
532 pipe->set_constant_buffer(pipe, shader, index, NULL);
533 }
534 }
535
536
537 /**
538 * Get the polygon offset enable/disable flag for the given polygon fill mode.
539 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
540 */
541 static inline boolean
542 util_get_offset(const struct pipe_rasterizer_state *templ,
543 unsigned fill_mode)
544 {
545 switch(fill_mode) {
546 case PIPE_POLYGON_MODE_POINT:
547 return templ->offset_point;
548 case PIPE_POLYGON_MODE_LINE:
549 return templ->offset_line;
550 case PIPE_POLYGON_MODE_FILL:
551 return templ->offset_tri;
552 default:
553 assert(0);
554 return FALSE;
555 }
556 }
557
558 static inline float
559 util_get_min_point_size(const struct pipe_rasterizer_state *state)
560 {
561 /* The point size should be clamped to this value at the rasterizer stage.
562 */
563 return !state->point_quad_rasterization &&
564 !state->point_smooth &&
565 !state->multisample ? 1.0f : 0.0f;
566 }
567
568 static inline void
569 util_query_clear_result(union pipe_query_result *result, unsigned type)
570 {
571 switch (type) {
572 case PIPE_QUERY_OCCLUSION_PREDICATE:
573 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
574 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
575 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
576 case PIPE_QUERY_GPU_FINISHED:
577 result->b = FALSE;
578 break;
579 case PIPE_QUERY_OCCLUSION_COUNTER:
580 case PIPE_QUERY_TIMESTAMP:
581 case PIPE_QUERY_TIME_ELAPSED:
582 case PIPE_QUERY_PRIMITIVES_GENERATED:
583 case PIPE_QUERY_PRIMITIVES_EMITTED:
584 result->u64 = 0;
585 break;
586 case PIPE_QUERY_SO_STATISTICS:
587 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
588 break;
589 case PIPE_QUERY_TIMESTAMP_DISJOINT:
590 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
591 break;
592 case PIPE_QUERY_PIPELINE_STATISTICS:
593 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
594 break;
595 default:
596 memset(result, 0, sizeof(*result));
597 }
598 }
599
600 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
601 static inline enum tgsi_texture_type
602 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
603 unsigned nr_samples)
604 {
605 switch (pipe_tex_target) {
606 case PIPE_BUFFER:
607 return TGSI_TEXTURE_BUFFER;
608
609 case PIPE_TEXTURE_1D:
610 assert(nr_samples <= 1);
611 return TGSI_TEXTURE_1D;
612
613 case PIPE_TEXTURE_2D:
614 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
615
616 case PIPE_TEXTURE_RECT:
617 assert(nr_samples <= 1);
618 return TGSI_TEXTURE_RECT;
619
620 case PIPE_TEXTURE_3D:
621 assert(nr_samples <= 1);
622 return TGSI_TEXTURE_3D;
623
624 case PIPE_TEXTURE_CUBE:
625 assert(nr_samples <= 1);
626 return TGSI_TEXTURE_CUBE;
627
628 case PIPE_TEXTURE_1D_ARRAY:
629 assert(nr_samples <= 1);
630 return TGSI_TEXTURE_1D_ARRAY;
631
632 case PIPE_TEXTURE_2D_ARRAY:
633 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
634 TGSI_TEXTURE_2D_ARRAY;
635
636 case PIPE_TEXTURE_CUBE_ARRAY:
637 return TGSI_TEXTURE_CUBE_ARRAY;
638
639 default:
640 assert(0 && "unexpected texture target");
641 return TGSI_TEXTURE_UNKNOWN;
642 }
643 }
644
645
646 static inline void
647 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
648 const struct pipe_constant_buffer *src)
649 {
650 if (src) {
651 pipe_resource_reference(&dst->buffer, src->buffer);
652 dst->buffer_offset = src->buffer_offset;
653 dst->buffer_size = src->buffer_size;
654 dst->user_buffer = src->user_buffer;
655 }
656 else {
657 pipe_resource_reference(&dst->buffer, NULL);
658 dst->buffer_offset = 0;
659 dst->buffer_size = 0;
660 dst->user_buffer = NULL;
661 }
662 }
663
664 static inline void
665 util_copy_image_view(struct pipe_image_view *dst,
666 const struct pipe_image_view *src)
667 {
668 if (src) {
669 pipe_resource_reference(&dst->resource, src->resource);
670 dst->format = src->format;
671 dst->access = src->access;
672 dst->u = src->u;
673 } else {
674 pipe_resource_reference(&dst->resource, NULL);
675 dst->format = PIPE_FORMAT_NONE;
676 dst->access = 0;
677 memset(&dst->u, 0, sizeof(dst->u));
678 }
679 }
680
681 static inline unsigned
682 util_max_layer(const struct pipe_resource *r, unsigned level)
683 {
684 switch (r->target) {
685 case PIPE_TEXTURE_3D:
686 return u_minify(r->depth0, level) - 1;
687 case PIPE_TEXTURE_CUBE:
688 assert(r->array_size == 6);
689 /* fall-through */
690 case PIPE_TEXTURE_1D_ARRAY:
691 case PIPE_TEXTURE_2D_ARRAY:
692 case PIPE_TEXTURE_CUBE_ARRAY:
693 return r->array_size - 1;
694 default:
695 return 0;
696 }
697 }
698
699 static inline unsigned
700 util_num_layers(const struct pipe_resource *r, unsigned level)
701 {
702 return util_max_layer(r, level) + 1;
703 }
704
705 static inline bool
706 util_texrange_covers_whole_level(const struct pipe_resource *tex,
707 unsigned level, unsigned x, unsigned y,
708 unsigned z, unsigned width,
709 unsigned height, unsigned depth)
710 {
711 return x == 0 && y == 0 && z == 0 &&
712 width == u_minify(tex->width0, level) &&
713 height == u_minify(tex->height0, level) &&
714 depth == util_num_layers(tex, level);
715 }
716
717 #ifdef __cplusplus
718 }
719 #endif
720
721 #endif /* U_INLINES_H */