nvc0: do not force re-binding of compute constbufs on Fermi
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *reference, unsigned count)
56 {
57 p_atomic_set(&reference->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *reference)
62 {
63 return p_atomic_read(&reference->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *ptr,
74 struct pipe_reference *reference,
75 debug_reference_descriptor get_desc)
76 {
77 boolean destroy = FALSE;
78
79 if(ptr != reference) {
80 /* bump the reference.count first */
81 if (reference) {
82 assert(pipe_is_referenced(reference));
83 p_atomic_inc(&reference->count);
84 debug_reference(reference, get_desc, 1);
85 }
86
87 if (ptr) {
88 assert(pipe_is_referenced(ptr));
89 if (p_atomic_dec_zero(&ptr->count)) {
90 destroy = TRUE;
91 }
92 debug_reference(ptr, get_desc, -1);
93 }
94 }
95
96 return destroy;
97 }
98
99 static inline boolean
100 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101 {
102 return pipe_reference_described(ptr, reference,
103 (debug_reference_descriptor)debug_describe_reference);
104 }
105
106 static inline void
107 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108 {
109 struct pipe_surface *old_surf = *ptr;
110
111 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112 (debug_reference_descriptor)debug_describe_surface))
113 old_surf->context->surface_destroy(old_surf->context, old_surf);
114 *ptr = surf;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 if (pipe_reference_described(&(*ptr)->reference, NULL,
127 (debug_reference_descriptor)debug_describe_surface))
128 pipe->surface_destroy(pipe, *ptr);
129 *ptr = NULL;
130 }
131
132
133 static inline void
134 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135 {
136 struct pipe_resource *old_tex = *ptr;
137
138 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139 (debug_reference_descriptor)debug_describe_resource))
140 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
141 *ptr = tex;
142 }
143
144 static inline void
145 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
146 {
147 struct pipe_sampler_view *old_view = *ptr;
148
149 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
150 (debug_reference_descriptor)debug_describe_sampler_view))
151 old_view->context->sampler_view_destroy(old_view->context, old_view);
152 *ptr = view;
153 }
154
155 /**
156 * Similar to pipe_sampler_view_reference() but always set the pointer to
157 * NULL and pass in an explicit context. Passing an explicit context is a
158 * work-around for fixing a dangling context pointer problem when textures
159 * are shared by multiple contexts. XXX fix this someday.
160 */
161 static inline void
162 pipe_sampler_view_release(struct pipe_context *ctx,
163 struct pipe_sampler_view **ptr)
164 {
165 struct pipe_sampler_view *old_view = *ptr;
166 if (*ptr && (*ptr)->context != ctx) {
167 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
168 }
169 if (pipe_reference_described(&(*ptr)->reference, NULL,
170 (debug_reference_descriptor)debug_describe_sampler_view)) {
171 ctx->sampler_view_destroy(ctx, old_view);
172 }
173 *ptr = NULL;
174 }
175
176 static inline void
177 pipe_image_view_reference(struct pipe_image_view **ptr, struct pipe_image_view *view)
178 {
179 struct pipe_image_view *old_view = *ptr;
180
181 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
182 (debug_reference_descriptor)debug_describe_image_view))
183 old_view->context->image_view_destroy(old_view->context, old_view);
184 *ptr = view;
185 }
186
187 static inline void
188 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
189 struct pipe_stream_output_target *target)
190 {
191 struct pipe_stream_output_target *old = *ptr;
192
193 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
194 (debug_reference_descriptor)debug_describe_so_target))
195 old->context->stream_output_target_destroy(old->context, old);
196 *ptr = target;
197 }
198
199 static inline void
200 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
201 struct pipe_resource *pt, unsigned level, unsigned layer)
202 {
203 pipe_resource_reference(&ps->texture, pt);
204 ps->format = pt->format;
205 ps->width = u_minify(pt->width0, level);
206 ps->height = u_minify(pt->height0, level);
207 ps->u.tex.level = level;
208 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
209 ps->context = ctx;
210 }
211
212 static inline void
213 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
214 struct pipe_resource *pt, unsigned level, unsigned layer)
215 {
216 ps->texture = 0;
217 pipe_reference_init(&ps->reference, 1);
218 pipe_surface_reset(ctx, ps, pt, level, layer);
219 }
220
221 /* Return true if the surfaces are equal. */
222 static inline boolean
223 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
224 {
225 return s1->texture == s2->texture &&
226 s1->format == s2->format &&
227 (s1->texture->target != PIPE_BUFFER ||
228 (s1->u.buf.first_element == s2->u.buf.first_element &&
229 s1->u.buf.last_element == s2->u.buf.last_element)) &&
230 (s1->texture->target == PIPE_BUFFER ||
231 (s1->u.tex.level == s2->u.tex.level &&
232 s1->u.tex.first_layer == s2->u.tex.first_layer &&
233 s1->u.tex.last_layer == s2->u.tex.last_layer));
234 }
235
236 /*
237 * Convenience wrappers for screen buffer functions.
238 */
239
240
241 /**
242 * Create a new resource.
243 * \param bind bitmask of PIPE_BIND_x flags
244 * \param usage bitmask of PIPE_USAGE_x flags
245 */
246 static inline struct pipe_resource *
247 pipe_buffer_create( struct pipe_screen *screen,
248 unsigned bind,
249 unsigned usage,
250 unsigned size )
251 {
252 struct pipe_resource buffer;
253 memset(&buffer, 0, sizeof buffer);
254 buffer.target = PIPE_BUFFER;
255 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
256 buffer.bind = bind;
257 buffer.usage = usage;
258 buffer.flags = 0;
259 buffer.width0 = size;
260 buffer.height0 = 1;
261 buffer.depth0 = 1;
262 buffer.array_size = 1;
263 return screen->resource_create(screen, &buffer);
264 }
265
266
267 /**
268 * Map a range of a resource.
269 * \param offset start of region, in bytes
270 * \param length size of region, in bytes
271 * \param access bitmask of PIPE_TRANSFER_x flags
272 * \param transfer returns a transfer object
273 */
274 static inline void *
275 pipe_buffer_map_range(struct pipe_context *pipe,
276 struct pipe_resource *buffer,
277 unsigned offset,
278 unsigned length,
279 unsigned access,
280 struct pipe_transfer **transfer)
281 {
282 struct pipe_box box;
283 void *map;
284
285 assert(offset < buffer->width0);
286 assert(offset + length <= buffer->width0);
287 assert(length);
288
289 u_box_1d(offset, length, &box);
290
291 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
292 if (!map) {
293 return NULL;
294 }
295
296 return map;
297 }
298
299
300 /**
301 * Map whole resource.
302 * \param access bitmask of PIPE_TRANSFER_x flags
303 * \param transfer returns a transfer object
304 */
305 static inline void *
306 pipe_buffer_map(struct pipe_context *pipe,
307 struct pipe_resource *buffer,
308 unsigned access,
309 struct pipe_transfer **transfer)
310 {
311 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
312 }
313
314
315 static inline void
316 pipe_buffer_unmap(struct pipe_context *pipe,
317 struct pipe_transfer *transfer)
318 {
319 pipe->transfer_unmap(pipe, transfer);
320 }
321
322 static inline void
323 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
324 struct pipe_transfer *transfer,
325 unsigned offset,
326 unsigned length)
327 {
328 struct pipe_box box;
329 int transfer_offset;
330
331 assert(length);
332 assert(transfer->box.x <= (int) offset);
333 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
334
335 /* Match old screen->buffer_flush_mapped_range() behaviour, where
336 * offset parameter is relative to the start of the buffer, not the
337 * mapped range.
338 */
339 transfer_offset = offset - transfer->box.x;
340
341 u_box_1d(transfer_offset, length, &box);
342
343 pipe->transfer_flush_region(pipe, transfer, &box);
344 }
345
346 static inline void
347 pipe_buffer_write(struct pipe_context *pipe,
348 struct pipe_resource *buf,
349 unsigned offset,
350 unsigned size,
351 const void *data)
352 {
353 struct pipe_box box;
354 unsigned access = PIPE_TRANSFER_WRITE;
355
356 if (offset == 0 && size == buf->width0) {
357 access |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
358 } else {
359 access |= PIPE_TRANSFER_DISCARD_RANGE;
360 }
361
362 u_box_1d(offset, size, &box);
363
364 pipe->transfer_inline_write( pipe,
365 buf,
366 0,
367 access,
368 &box,
369 data,
370 size,
371 0);
372 }
373
374 /**
375 * Special case for writing non-overlapping ranges.
376 *
377 * We can avoid GPU/CPU synchronization when writing range that has never
378 * been written before.
379 */
380 static inline void
381 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
382 struct pipe_resource *buf,
383 unsigned offset, unsigned size,
384 const void *data)
385 {
386 struct pipe_box box;
387
388 u_box_1d(offset, size, &box);
389
390 pipe->transfer_inline_write(pipe,
391 buf,
392 0,
393 (PIPE_TRANSFER_WRITE |
394 PIPE_TRANSFER_UNSYNCHRONIZED),
395 &box,
396 data,
397 0, 0);
398 }
399
400
401 /**
402 * Create a new resource and immediately put data into it
403 * \param bind bitmask of PIPE_BIND_x flags
404 * \param usage bitmask of PIPE_USAGE_x flags
405 */
406 static inline struct pipe_resource *
407 pipe_buffer_create_with_data(struct pipe_context *pipe,
408 unsigned bind,
409 unsigned usage,
410 unsigned size,
411 const void *ptr)
412 {
413 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
414 bind, usage, size);
415 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
416 return res;
417 }
418
419 static inline void
420 pipe_buffer_read(struct pipe_context *pipe,
421 struct pipe_resource *buf,
422 unsigned offset,
423 unsigned size,
424 void *data)
425 {
426 struct pipe_transfer *src_transfer;
427 ubyte *map;
428
429 map = (ubyte *) pipe_buffer_map_range(pipe,
430 buf,
431 offset, size,
432 PIPE_TRANSFER_READ,
433 &src_transfer);
434 if (!map)
435 return;
436
437 memcpy(data, map, size);
438 pipe_buffer_unmap(pipe, src_transfer);
439 }
440
441
442 /**
443 * Map a resource for reading/writing.
444 * \param access bitmask of PIPE_TRANSFER_x flags
445 */
446 static inline void *
447 pipe_transfer_map(struct pipe_context *context,
448 struct pipe_resource *resource,
449 unsigned level, unsigned layer,
450 unsigned access,
451 unsigned x, unsigned y,
452 unsigned w, unsigned h,
453 struct pipe_transfer **transfer)
454 {
455 struct pipe_box box;
456 u_box_2d_zslice(x, y, layer, w, h, &box);
457 return context->transfer_map(context,
458 resource,
459 level,
460 access,
461 &box, transfer);
462 }
463
464
465 /**
466 * Map a 3D (texture) resource for reading/writing.
467 * \param access bitmask of PIPE_TRANSFER_x flags
468 */
469 static inline void *
470 pipe_transfer_map_3d(struct pipe_context *context,
471 struct pipe_resource *resource,
472 unsigned level,
473 unsigned access,
474 unsigned x, unsigned y, unsigned z,
475 unsigned w, unsigned h, unsigned d,
476 struct pipe_transfer **transfer)
477 {
478 struct pipe_box box;
479 u_box_3d(x, y, z, w, h, d, &box);
480 return context->transfer_map(context,
481 resource,
482 level,
483 access,
484 &box, transfer);
485 }
486
487 static inline void
488 pipe_transfer_unmap( struct pipe_context *context,
489 struct pipe_transfer *transfer )
490 {
491 context->transfer_unmap( context, transfer );
492 }
493
494 static inline void
495 pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
496 struct pipe_resource *buf)
497 {
498 if (buf) {
499 struct pipe_constant_buffer cb;
500 cb.buffer = buf;
501 cb.buffer_offset = 0;
502 cb.buffer_size = buf->width0;
503 cb.user_buffer = NULL;
504 pipe->set_constant_buffer(pipe, shader, index, &cb);
505 } else {
506 pipe->set_constant_buffer(pipe, shader, index, NULL);
507 }
508 }
509
510
511 /**
512 * Get the polygon offset enable/disable flag for the given polygon fill mode.
513 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
514 */
515 static inline boolean
516 util_get_offset(const struct pipe_rasterizer_state *templ,
517 unsigned fill_mode)
518 {
519 switch(fill_mode) {
520 case PIPE_POLYGON_MODE_POINT:
521 return templ->offset_point;
522 case PIPE_POLYGON_MODE_LINE:
523 return templ->offset_line;
524 case PIPE_POLYGON_MODE_FILL:
525 return templ->offset_tri;
526 default:
527 assert(0);
528 return FALSE;
529 }
530 }
531
532 static inline float
533 util_get_min_point_size(const struct pipe_rasterizer_state *state)
534 {
535 /* The point size should be clamped to this value at the rasterizer stage.
536 */
537 return !state->point_quad_rasterization &&
538 !state->point_smooth &&
539 !state->multisample ? 1.0f : 0.0f;
540 }
541
542 static inline void
543 util_query_clear_result(union pipe_query_result *result, unsigned type)
544 {
545 switch (type) {
546 case PIPE_QUERY_OCCLUSION_PREDICATE:
547 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
548 case PIPE_QUERY_GPU_FINISHED:
549 result->b = FALSE;
550 break;
551 case PIPE_QUERY_OCCLUSION_COUNTER:
552 case PIPE_QUERY_TIMESTAMP:
553 case PIPE_QUERY_TIME_ELAPSED:
554 case PIPE_QUERY_PRIMITIVES_GENERATED:
555 case PIPE_QUERY_PRIMITIVES_EMITTED:
556 result->u64 = 0;
557 break;
558 case PIPE_QUERY_SO_STATISTICS:
559 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
560 break;
561 case PIPE_QUERY_TIMESTAMP_DISJOINT:
562 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
563 break;
564 case PIPE_QUERY_PIPELINE_STATISTICS:
565 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
566 break;
567 default:
568 memset(result, 0, sizeof(*result));
569 }
570 }
571
572 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
573 static inline unsigned
574 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
575 unsigned nr_samples)
576 {
577 switch (pipe_tex_target) {
578 case PIPE_BUFFER:
579 return TGSI_TEXTURE_BUFFER;
580
581 case PIPE_TEXTURE_1D:
582 assert(nr_samples <= 1);
583 return TGSI_TEXTURE_1D;
584
585 case PIPE_TEXTURE_2D:
586 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
587
588 case PIPE_TEXTURE_RECT:
589 assert(nr_samples <= 1);
590 return TGSI_TEXTURE_RECT;
591
592 case PIPE_TEXTURE_3D:
593 assert(nr_samples <= 1);
594 return TGSI_TEXTURE_3D;
595
596 case PIPE_TEXTURE_CUBE:
597 assert(nr_samples <= 1);
598 return TGSI_TEXTURE_CUBE;
599
600 case PIPE_TEXTURE_1D_ARRAY:
601 assert(nr_samples <= 1);
602 return TGSI_TEXTURE_1D_ARRAY;
603
604 case PIPE_TEXTURE_2D_ARRAY:
605 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
606 TGSI_TEXTURE_2D_ARRAY;
607
608 case PIPE_TEXTURE_CUBE_ARRAY:
609 return TGSI_TEXTURE_CUBE_ARRAY;
610
611 default:
612 assert(0 && "unexpected texture target");
613 return TGSI_TEXTURE_UNKNOWN;
614 }
615 }
616
617
618 static inline void
619 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
620 const struct pipe_constant_buffer *src)
621 {
622 if (src) {
623 pipe_resource_reference(&dst->buffer, src->buffer);
624 dst->buffer_offset = src->buffer_offset;
625 dst->buffer_size = src->buffer_size;
626 dst->user_buffer = src->user_buffer;
627 }
628 else {
629 pipe_resource_reference(&dst->buffer, NULL);
630 dst->buffer_offset = 0;
631 dst->buffer_size = 0;
632 dst->user_buffer = NULL;
633 }
634 }
635
636 static inline unsigned
637 util_max_layer(const struct pipe_resource *r, unsigned level)
638 {
639 switch (r->target) {
640 case PIPE_TEXTURE_3D:
641 return u_minify(r->depth0, level) - 1;
642 case PIPE_TEXTURE_CUBE:
643 assert(r->array_size == 6);
644 /* fall-through */
645 case PIPE_TEXTURE_1D_ARRAY:
646 case PIPE_TEXTURE_2D_ARRAY:
647 case PIPE_TEXTURE_CUBE_ARRAY:
648 return r->array_size - 1;
649 default:
650 return 0;
651 }
652 }
653
654 static inline unsigned
655 util_pipe_shader_from_tgsi_processor(unsigned processor)
656 {
657 switch (processor) {
658 case TGSI_PROCESSOR_VERTEX:
659 return PIPE_SHADER_VERTEX;
660 case TGSI_PROCESSOR_TESS_CTRL:
661 return PIPE_SHADER_TESS_CTRL;
662 case TGSI_PROCESSOR_TESS_EVAL:
663 return PIPE_SHADER_TESS_EVAL;
664 case TGSI_PROCESSOR_GEOMETRY:
665 return PIPE_SHADER_GEOMETRY;
666 case TGSI_PROCESSOR_FRAGMENT:
667 return PIPE_SHADER_FRAGMENT;
668 case TGSI_PROCESSOR_COMPUTE:
669 return PIPE_SHADER_COMPUTE;
670 default:
671 assert(0);
672 return PIPE_SHADER_VERTEX;
673 }
674 }
675
676 #ifdef __cplusplus
677 }
678 #endif
679
680 #endif /* U_INLINES_H */