49b4531dbacbd862b7ef19c485b3b287443decfe
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_debug.h"
36 #include "util/u_debug_describe.h"
37 #include "util/u_debug_refcnt.h"
38 #include "util/u_atomic.h"
39 #include "util/u_box.h"
40 #include "util/u_math.h"
41
42
43 #ifdef __cplusplus
44 extern "C" {
45 #endif
46
47
48 /*
49 * Reference counting helper functions.
50 */
51
52
53 static INLINE void
54 pipe_reference_init(struct pipe_reference *reference, unsigned count)
55 {
56 p_atomic_set(&reference->count, count);
57 }
58
59 static INLINE boolean
60 pipe_is_referenced(struct pipe_reference *reference)
61 {
62 return p_atomic_read(&reference->count) != 0;
63 }
64
65 /**
66 * Update reference counting.
67 * The old thing pointed to, if any, will be unreferenced.
68 * Both 'ptr' and 'reference' may be NULL.
69 * \return TRUE if the object's refcount hits zero and should be destroyed.
70 */
71 static INLINE boolean
72 pipe_reference_described(struct pipe_reference *ptr,
73 struct pipe_reference *reference,
74 debug_reference_descriptor get_desc)
75 {
76 boolean destroy = FALSE;
77
78 if(ptr != reference) {
79 /* bump the reference.count first */
80 if (reference) {
81 assert(pipe_is_referenced(reference));
82 p_atomic_inc(&reference->count);
83 debug_reference(reference, get_desc, 1);
84 }
85
86 if (ptr) {
87 assert(pipe_is_referenced(ptr));
88 if (p_atomic_dec_zero(&ptr->count)) {
89 destroy = TRUE;
90 }
91 debug_reference(ptr, get_desc, -1);
92 }
93 }
94
95 return destroy;
96 }
97
98 static INLINE boolean
99 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
100 {
101 return pipe_reference_described(ptr, reference,
102 (debug_reference_descriptor)debug_describe_reference);
103 }
104
105 static INLINE void
106 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
107 {
108 struct pipe_surface *old_surf = *ptr;
109
110 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
111 (debug_reference_descriptor)debug_describe_surface))
112 old_surf->context->surface_destroy(old_surf->context, old_surf);
113 *ptr = surf;
114 }
115
116 static INLINE void
117 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
118 {
119 struct pipe_resource *old_tex = *ptr;
120
121 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
122 (debug_reference_descriptor)debug_describe_resource))
123 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
124 *ptr = tex;
125 }
126
127 static INLINE void
128 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
129 {
130 struct pipe_sampler_view *old_view = *ptr;
131
132 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
133 (debug_reference_descriptor)debug_describe_sampler_view))
134 old_view->context->sampler_view_destroy(old_view->context, old_view);
135 *ptr = view;
136 }
137
138 /**
139 * Similar to pipe_sampler_view_reference() but always set the pointer to
140 * NULL and pass in an explicit context. Passing an explicit context is a
141 * work-around for fixing a dangling context pointer problem when textures
142 * are shared by multiple contexts. XXX fix this someday.
143 */
144 static INLINE void
145 pipe_sampler_view_release(struct pipe_context *ctx,
146 struct pipe_sampler_view **ptr)
147 {
148 struct pipe_sampler_view *old_view = *ptr;
149 if (*ptr && (*ptr)->context != ctx) {
150 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
151 }
152 if (pipe_reference_described(&(*ptr)->reference, NULL,
153 (debug_reference_descriptor)debug_describe_sampler_view)) {
154 ctx->sampler_view_destroy(ctx, old_view);
155 }
156 *ptr = NULL;
157 }
158
159
160 static INLINE void
161 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
162 struct pipe_stream_output_target *target)
163 {
164 struct pipe_stream_output_target *old = *ptr;
165
166 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
167 (debug_reference_descriptor)debug_describe_so_target))
168 old->context->stream_output_target_destroy(old->context, old);
169 *ptr = target;
170 }
171
172 static INLINE void
173 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
174 struct pipe_resource *pt, unsigned level, unsigned layer,
175 unsigned flags)
176 {
177 pipe_resource_reference(&ps->texture, pt);
178 ps->format = pt->format;
179 ps->width = u_minify(pt->width0, level);
180 ps->height = u_minify(pt->height0, level);
181 ps->usage = flags;
182 ps->u.tex.level = level;
183 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
184 ps->context = ctx;
185 }
186
187 static INLINE void
188 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
189 struct pipe_resource *pt, unsigned level, unsigned layer,
190 unsigned flags)
191 {
192 ps->texture = 0;
193 pipe_reference_init(&ps->reference, 1);
194 pipe_surface_reset(ctx, ps, pt, level, layer, flags);
195 }
196
197 /* Return true if the surfaces are equal. */
198 static INLINE boolean
199 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
200 {
201 return s1->texture == s2->texture &&
202 s1->format == s2->format &&
203 (s1->texture->target != PIPE_BUFFER ||
204 (s1->u.buf.first_element == s2->u.buf.first_element &&
205 s1->u.buf.last_element == s2->u.buf.last_element)) &&
206 (s1->texture->target == PIPE_BUFFER ||
207 (s1->u.tex.level == s2->u.tex.level &&
208 s1->u.tex.first_layer == s2->u.tex.first_layer &&
209 s1->u.tex.last_layer == s2->u.tex.last_layer));
210 }
211
212 /*
213 * Convenience wrappers for screen buffer functions.
214 */
215
216 static INLINE struct pipe_resource *
217 pipe_buffer_create( struct pipe_screen *screen,
218 unsigned bind,
219 unsigned usage,
220 unsigned size )
221 {
222 struct pipe_resource buffer;
223 memset(&buffer, 0, sizeof buffer);
224 buffer.target = PIPE_BUFFER;
225 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
226 buffer.bind = bind;
227 buffer.usage = usage;
228 buffer.flags = 0;
229 buffer.width0 = size;
230 buffer.height0 = 1;
231 buffer.depth0 = 1;
232 buffer.array_size = 1;
233 return screen->resource_create(screen, &buffer);
234 }
235
236
237 static INLINE struct pipe_resource *
238 pipe_user_buffer_create( struct pipe_screen *screen, void *ptr, unsigned size,
239 unsigned usage )
240 {
241 return screen->user_buffer_create(screen, ptr, size, usage);
242 }
243
244 static INLINE void *
245 pipe_buffer_map_range(struct pipe_context *pipe,
246 struct pipe_resource *buffer,
247 unsigned offset,
248 unsigned length,
249 unsigned usage,
250 struct pipe_transfer **transfer)
251 {
252 struct pipe_box box;
253 void *map;
254
255 assert(offset < buffer->width0);
256 assert(offset + length <= buffer->width0);
257 assert(length);
258
259 u_box_1d(offset, length, &box);
260
261 *transfer = pipe->get_transfer( pipe,
262 buffer,
263 0,
264 usage,
265 &box);
266
267 if (*transfer == NULL)
268 return NULL;
269
270 map = pipe->transfer_map( pipe, *transfer );
271 if (map == NULL) {
272 pipe->transfer_destroy( pipe, *transfer );
273 *transfer = NULL;
274 return NULL;
275 }
276
277 return map;
278 }
279
280
281 static INLINE void *
282 pipe_buffer_map(struct pipe_context *pipe,
283 struct pipe_resource *buffer,
284 unsigned usage,
285 struct pipe_transfer **transfer)
286 {
287 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer);
288 }
289
290
291 static INLINE void
292 pipe_buffer_unmap(struct pipe_context *pipe,
293 struct pipe_transfer *transfer)
294 {
295 if (transfer) {
296 pipe->transfer_unmap(pipe, transfer);
297 pipe->transfer_destroy(pipe, transfer);
298 }
299 }
300
301 static INLINE void
302 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
303 struct pipe_transfer *transfer,
304 unsigned offset,
305 unsigned length)
306 {
307 struct pipe_box box;
308 int transfer_offset;
309
310 assert(length);
311 assert(transfer->box.x <= offset);
312 assert(offset + length <= transfer->box.x + transfer->box.width);
313
314 /* Match old screen->buffer_flush_mapped_range() behaviour, where
315 * offset parameter is relative to the start of the buffer, not the
316 * mapped range.
317 */
318 transfer_offset = offset - transfer->box.x;
319
320 u_box_1d(transfer_offset, length, &box);
321
322 pipe->transfer_flush_region(pipe, transfer, &box);
323 }
324
325 static INLINE void
326 pipe_buffer_write(struct pipe_context *pipe,
327 struct pipe_resource *buf,
328 unsigned offset,
329 unsigned size,
330 const void *data)
331 {
332 struct pipe_box box;
333 unsigned usage = PIPE_TRANSFER_WRITE;
334
335 if (offset == 0 && size == buf->width0) {
336 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
337 } else {
338 usage |= PIPE_TRANSFER_DISCARD_RANGE;
339 }
340
341 u_box_1d(offset, size, &box);
342
343 pipe->transfer_inline_write( pipe,
344 buf,
345 0,
346 usage,
347 &box,
348 data,
349 size,
350 0);
351 }
352
353 /**
354 * Special case for writing non-overlapping ranges.
355 *
356 * We can avoid GPU/CPU synchronization when writing range that has never
357 * been written before.
358 */
359 static INLINE void
360 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
361 struct pipe_resource *buf,
362 unsigned offset, unsigned size,
363 const void *data)
364 {
365 struct pipe_box box;
366
367 u_box_1d(offset, size, &box);
368
369 pipe->transfer_inline_write(pipe,
370 buf,
371 0,
372 (PIPE_TRANSFER_WRITE |
373 PIPE_TRANSFER_UNSYNCHRONIZED),
374 &box,
375 data,
376 0, 0);
377 }
378
379 static INLINE void
380 pipe_buffer_read(struct pipe_context *pipe,
381 struct pipe_resource *buf,
382 unsigned offset,
383 unsigned size,
384 void *data)
385 {
386 struct pipe_transfer *src_transfer;
387 ubyte *map;
388
389 map = (ubyte *) pipe_buffer_map_range(pipe,
390 buf,
391 offset, size,
392 PIPE_TRANSFER_READ,
393 &src_transfer);
394
395 if (map)
396 memcpy(data, map, size);
397
398 pipe_buffer_unmap(pipe, src_transfer);
399 }
400
401 static INLINE struct pipe_transfer *
402 pipe_get_transfer( struct pipe_context *context,
403 struct pipe_resource *resource,
404 unsigned level, unsigned layer,
405 enum pipe_transfer_usage usage,
406 unsigned x, unsigned y,
407 unsigned w, unsigned h)
408 {
409 struct pipe_box box;
410 u_box_2d_zslice( x, y, layer, w, h, &box );
411 return context->get_transfer( context,
412 resource,
413 level,
414 usage,
415 &box );
416 }
417
418 static INLINE void *
419 pipe_transfer_map( struct pipe_context *context,
420 struct pipe_transfer *transfer )
421 {
422 return context->transfer_map( context, transfer );
423 }
424
425 static INLINE void
426 pipe_transfer_unmap( struct pipe_context *context,
427 struct pipe_transfer *transfer )
428 {
429 context->transfer_unmap( context, transfer );
430 }
431
432
433 static INLINE void
434 pipe_transfer_destroy( struct pipe_context *context,
435 struct pipe_transfer *transfer )
436 {
437 context->transfer_destroy(context, transfer);
438 }
439
440
441 static INLINE boolean util_get_offset(
442 const struct pipe_rasterizer_state *templ,
443 unsigned fill_mode)
444 {
445 switch(fill_mode) {
446 case PIPE_POLYGON_MODE_POINT:
447 return templ->offset_point;
448 case PIPE_POLYGON_MODE_LINE:
449 return templ->offset_line;
450 case PIPE_POLYGON_MODE_FILL:
451 return templ->offset_tri;
452 default:
453 assert(0);
454 return FALSE;
455 }
456 }
457
458 /**
459 * This function is used to copy an array of pipe_vertex_buffer structures,
460 * while properly referencing the pipe_vertex_buffer::buffer member.
461 *
462 * \sa util_copy_framebuffer_state
463 */
464 static INLINE void util_copy_vertex_buffers(struct pipe_vertex_buffer *dst,
465 unsigned *dst_count,
466 const struct pipe_vertex_buffer *src,
467 unsigned src_count)
468 {
469 unsigned i;
470
471 /* Reference the buffers of 'src' in 'dst'. */
472 for (i = 0; i < src_count; i++) {
473 pipe_resource_reference(&dst[i].buffer, src[i].buffer);
474 }
475 /* Unreference the rest of the buffers in 'dst'. */
476 for (; i < *dst_count; i++) {
477 pipe_resource_reference(&dst[i].buffer, NULL);
478 }
479
480 /* Update the size of 'dst' and copy over the other members
481 * of pipe_vertex_buffer. */
482 *dst_count = src_count;
483 memcpy(dst, src, src_count * sizeof(struct pipe_vertex_buffer));
484 }
485
486 static INLINE float
487 util_get_min_point_size(const struct pipe_rasterizer_state *state)
488 {
489 /* The point size should be clamped to this value at the rasterizer stage.
490 */
491 return state->gl_rasterization_rules &&
492 !state->point_quad_rasterization &&
493 !state->point_smooth &&
494 !state->multisample ? 1.0f : 0.0f;
495 }
496
497 static INLINE void
498 util_query_clear_result(union pipe_query_result *result, unsigned type)
499 {
500 switch (type) {
501 case PIPE_QUERY_OCCLUSION_PREDICATE:
502 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
503 case PIPE_QUERY_GPU_FINISHED:
504 result->b = FALSE;
505 break;
506 case PIPE_QUERY_OCCLUSION_COUNTER:
507 case PIPE_QUERY_TIMESTAMP:
508 case PIPE_QUERY_TIME_ELAPSED:
509 case PIPE_QUERY_PRIMITIVES_GENERATED:
510 case PIPE_QUERY_PRIMITIVES_EMITTED:
511 result->u64 = 0;
512 break;
513 case PIPE_QUERY_SO_STATISTICS:
514 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
515 break;
516 case PIPE_QUERY_TIMESTAMP_DISJOINT:
517 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
518 break;
519 case PIPE_QUERY_PIPELINE_STATISTICS:
520 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
521 break;
522 default:
523 assert(0);
524 }
525 }
526
527 #ifdef __cplusplus
528 }
529 #endif
530
531 #endif /* U_INLINES_H */