util: add a resource wrapper to get resource samples
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *dst, unsigned count)
56 {
57 p_atomic_set(&dst->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *src)
62 {
63 return p_atomic_read(&src->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'dst' and 'src' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *dst,
74 struct pipe_reference *src,
75 debug_reference_descriptor get_desc)
76 {
77 if (dst != src) {
78 /* bump the src.count first */
79 if (src) {
80 ASSERTED int count = p_atomic_inc_return(&src->count);
81 assert(count != 1); /* src had to be referenced */
82 debug_reference(src, get_desc, 1);
83 }
84
85 if (dst) {
86 int count = p_atomic_dec_return(&dst->count);
87 assert(count != -1); /* dst had to be referenced */
88 debug_reference(dst, get_desc, -1);
89 if (!count)
90 return true;
91 }
92 }
93
94 return false;
95 }
96
97 static inline boolean
98 pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
99 {
100 return pipe_reference_described(dst, src,
101 (debug_reference_descriptor)
102 debug_describe_reference);
103 }
104
105 static inline void
106 pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
107 {
108 struct pipe_surface *old_dst = *dst;
109
110 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
111 src ? &src->reference : NULL,
112 (debug_reference_descriptor)
113 debug_describe_surface))
114 old_dst->context->surface_destroy(old_dst->context, old_dst);
115 *dst = src;
116 }
117
118 /**
119 * Similar to pipe_surface_reference() but always set the pointer to NULL
120 * and pass in an explicit context. The explicit context avoids the problem
121 * of using a deleted context's surface_destroy() method when freeing a surface
122 * that's shared by multiple contexts.
123 */
124 static inline void
125 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
126 {
127 struct pipe_surface *old = *ptr;
128
129 if (pipe_reference_described(&old->reference, NULL,
130 (debug_reference_descriptor)
131 debug_describe_surface))
132 pipe->surface_destroy(pipe, old);
133 *ptr = NULL;
134 }
135
136
137 static inline void
138 pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
139 {
140 struct pipe_resource *old_dst = *dst;
141
142 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
143 src ? &src->reference : NULL,
144 (debug_reference_descriptor)
145 debug_describe_resource)) {
146 /* Avoid recursion, which would prevent inlining this function */
147 do {
148 struct pipe_resource *next = old_dst->next;
149
150 old_dst->screen->resource_destroy(old_dst->screen, old_dst);
151 old_dst = next;
152 } while (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
153 NULL,
154 (debug_reference_descriptor)
155 debug_describe_resource));
156 }
157 *dst = src;
158 }
159
160 /**
161 * Same as pipe_surface_release, but used when pipe_context doesn't exist
162 * anymore.
163 */
164 static inline void
165 pipe_surface_release_no_context(struct pipe_surface **ptr)
166 {
167 struct pipe_surface *surf = *ptr;
168
169 if (pipe_reference_described(&surf->reference, NULL,
170 (debug_reference_descriptor)
171 debug_describe_surface)) {
172 /* trivially destroy pipe_surface */
173 pipe_resource_reference(&surf->texture, NULL);
174 free(surf);
175 }
176 *ptr = NULL;
177 }
178
179 /**
180 * Set *dst to \p src with proper reference counting.
181 *
182 * The caller must guarantee that \p src and *dst were created in
183 * the same context (if they exist), and that this must be the current context.
184 */
185 static inline void
186 pipe_sampler_view_reference(struct pipe_sampler_view **dst,
187 struct pipe_sampler_view *src)
188 {
189 struct pipe_sampler_view *old_dst = *dst;
190
191 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
192 src ? &src->reference : NULL,
193 (debug_reference_descriptor)
194 debug_describe_sampler_view))
195 old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
196 *dst = src;
197 }
198
199 static inline void
200 pipe_so_target_reference(struct pipe_stream_output_target **dst,
201 struct pipe_stream_output_target *src)
202 {
203 struct pipe_stream_output_target *old_dst = *dst;
204
205 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
206 src ? &src->reference : NULL,
207 (debug_reference_descriptor)debug_describe_so_target))
208 old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
209 *dst = src;
210 }
211
212 static inline void
213 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
214 {
215 if (dst->is_user_buffer)
216 dst->buffer.user = NULL;
217 else
218 pipe_resource_reference(&dst->buffer.resource, NULL);
219 }
220
221 static inline void
222 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
223 const struct pipe_vertex_buffer *src)
224 {
225 pipe_vertex_buffer_unreference(dst);
226 if (!src->is_user_buffer)
227 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
228 memcpy(dst, src, sizeof(*src));
229 }
230
231 static inline void
232 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
233 struct pipe_resource *pt, unsigned level, unsigned layer)
234 {
235 pipe_resource_reference(&ps->texture, pt);
236 ps->format = pt->format;
237 ps->width = u_minify(pt->width0, level);
238 ps->height = u_minify(pt->height0, level);
239 ps->u.tex.level = level;
240 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
241 ps->context = ctx;
242 }
243
244 static inline void
245 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
246 struct pipe_resource *pt, unsigned level, unsigned layer)
247 {
248 ps->texture = 0;
249 pipe_reference_init(&ps->reference, 1);
250 pipe_surface_reset(ctx, ps, pt, level, layer);
251 }
252
253 /* Return true if the surfaces are equal. */
254 static inline boolean
255 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
256 {
257 return s1->texture == s2->texture &&
258 s1->format == s2->format &&
259 (s1->texture->target != PIPE_BUFFER ||
260 (s1->u.buf.first_element == s2->u.buf.first_element &&
261 s1->u.buf.last_element == s2->u.buf.last_element)) &&
262 (s1->texture->target == PIPE_BUFFER ||
263 (s1->u.tex.level == s2->u.tex.level &&
264 s1->u.tex.first_layer == s2->u.tex.first_layer &&
265 s1->u.tex.last_layer == s2->u.tex.last_layer));
266 }
267
268 /*
269 * Convenience wrappers for screen buffer functions.
270 */
271
272
273 /**
274 * Create a new resource.
275 * \param bind bitmask of PIPE_BIND_x flags
276 * \param usage a PIPE_USAGE_x value
277 */
278 static inline struct pipe_resource *
279 pipe_buffer_create(struct pipe_screen *screen,
280 unsigned bind,
281 enum pipe_resource_usage usage,
282 unsigned size)
283 {
284 struct pipe_resource buffer;
285 memset(&buffer, 0, sizeof buffer);
286 buffer.target = PIPE_BUFFER;
287 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
288 buffer.bind = bind;
289 buffer.usage = usage;
290 buffer.flags = 0;
291 buffer.width0 = size;
292 buffer.height0 = 1;
293 buffer.depth0 = 1;
294 buffer.array_size = 1;
295 return screen->resource_create(screen, &buffer);
296 }
297
298
299 static inline struct pipe_resource *
300 pipe_buffer_create_const0(struct pipe_screen *screen,
301 unsigned bind,
302 enum pipe_resource_usage usage,
303 unsigned size)
304 {
305 struct pipe_resource buffer;
306 memset(&buffer, 0, sizeof buffer);
307 buffer.target = PIPE_BUFFER;
308 buffer.format = PIPE_FORMAT_R8_UNORM;
309 buffer.bind = bind;
310 buffer.usage = usage;
311 buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
312 buffer.width0 = size;
313 buffer.height0 = 1;
314 buffer.depth0 = 1;
315 buffer.array_size = 1;
316 return screen->resource_create(screen, &buffer);
317 }
318
319
320 /**
321 * Map a range of a resource.
322 * \param offset start of region, in bytes
323 * \param length size of region, in bytes
324 * \param access bitmask of PIPE_TRANSFER_x flags
325 * \param transfer returns a transfer object
326 */
327 static inline void *
328 pipe_buffer_map_range(struct pipe_context *pipe,
329 struct pipe_resource *buffer,
330 unsigned offset,
331 unsigned length,
332 unsigned access,
333 struct pipe_transfer **transfer)
334 {
335 struct pipe_box box;
336 void *map;
337
338 assert(offset < buffer->width0);
339 assert(offset + length <= buffer->width0);
340 assert(length);
341
342 u_box_1d(offset, length, &box);
343
344 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
345 if (!map) {
346 return NULL;
347 }
348
349 return map;
350 }
351
352
353 /**
354 * Map whole resource.
355 * \param access bitmask of PIPE_TRANSFER_x flags
356 * \param transfer returns a transfer object
357 */
358 static inline void *
359 pipe_buffer_map(struct pipe_context *pipe,
360 struct pipe_resource *buffer,
361 unsigned access,
362 struct pipe_transfer **transfer)
363 {
364 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
365 access, transfer);
366 }
367
368
369 static inline void
370 pipe_buffer_unmap(struct pipe_context *pipe,
371 struct pipe_transfer *transfer)
372 {
373 pipe->transfer_unmap(pipe, transfer);
374 }
375
376 static inline void
377 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
378 struct pipe_transfer *transfer,
379 unsigned offset,
380 unsigned length)
381 {
382 struct pipe_box box;
383 int transfer_offset;
384
385 assert(length);
386 assert(transfer->box.x <= (int) offset);
387 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
388
389 /* Match old screen->buffer_flush_mapped_range() behaviour, where
390 * offset parameter is relative to the start of the buffer, not the
391 * mapped range.
392 */
393 transfer_offset = offset - transfer->box.x;
394
395 u_box_1d(transfer_offset, length, &box);
396
397 pipe->transfer_flush_region(pipe, transfer, &box);
398 }
399
400 static inline void
401 pipe_buffer_write(struct pipe_context *pipe,
402 struct pipe_resource *buf,
403 unsigned offset,
404 unsigned size,
405 const void *data)
406 {
407 /* Don't set any other usage bits. Drivers should derive them. */
408 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
409 }
410
411 /**
412 * Special case for writing non-overlapping ranges.
413 *
414 * We can avoid GPU/CPU synchronization when writing range that has never
415 * been written before.
416 */
417 static inline void
418 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
419 struct pipe_resource *buf,
420 unsigned offset, unsigned size,
421 const void *data)
422 {
423 pipe->buffer_subdata(pipe, buf,
424 (PIPE_TRANSFER_WRITE |
425 PIPE_TRANSFER_UNSYNCHRONIZED),
426 offset, size, data);
427 }
428
429
430 /**
431 * Create a new resource and immediately put data into it
432 * \param bind bitmask of PIPE_BIND_x flags
433 * \param usage bitmask of PIPE_USAGE_x flags
434 */
435 static inline struct pipe_resource *
436 pipe_buffer_create_with_data(struct pipe_context *pipe,
437 unsigned bind,
438 enum pipe_resource_usage usage,
439 unsigned size,
440 const void *ptr)
441 {
442 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
443 bind, usage, size);
444 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
445 return res;
446 }
447
448 static inline void
449 pipe_buffer_read(struct pipe_context *pipe,
450 struct pipe_resource *buf,
451 unsigned offset,
452 unsigned size,
453 void *data)
454 {
455 struct pipe_transfer *src_transfer;
456 ubyte *map;
457
458 map = (ubyte *) pipe_buffer_map_range(pipe,
459 buf,
460 offset, size,
461 PIPE_TRANSFER_READ,
462 &src_transfer);
463 if (!map)
464 return;
465
466 memcpy(data, map, size);
467 pipe_buffer_unmap(pipe, src_transfer);
468 }
469
470
471 /**
472 * Map a resource for reading/writing.
473 * \param access bitmask of PIPE_TRANSFER_x flags
474 */
475 static inline void *
476 pipe_transfer_map(struct pipe_context *context,
477 struct pipe_resource *resource,
478 unsigned level, unsigned layer,
479 unsigned access,
480 unsigned x, unsigned y,
481 unsigned w, unsigned h,
482 struct pipe_transfer **transfer)
483 {
484 struct pipe_box box;
485 u_box_2d_zslice(x, y, layer, w, h, &box);
486 return context->transfer_map(context,
487 resource,
488 level,
489 access,
490 &box, transfer);
491 }
492
493
494 /**
495 * Map a 3D (texture) resource for reading/writing.
496 * \param access bitmask of PIPE_TRANSFER_x flags
497 */
498 static inline void *
499 pipe_transfer_map_3d(struct pipe_context *context,
500 struct pipe_resource *resource,
501 unsigned level,
502 unsigned access,
503 unsigned x, unsigned y, unsigned z,
504 unsigned w, unsigned h, unsigned d,
505 struct pipe_transfer **transfer)
506 {
507 struct pipe_box box;
508 u_box_3d(x, y, z, w, h, d, &box);
509 return context->transfer_map(context,
510 resource,
511 level,
512 access,
513 &box, transfer);
514 }
515
516 static inline void
517 pipe_transfer_unmap(struct pipe_context *context,
518 struct pipe_transfer *transfer)
519 {
520 context->transfer_unmap(context, transfer);
521 }
522
523 static inline void
524 pipe_set_constant_buffer(struct pipe_context *pipe,
525 enum pipe_shader_type shader, uint index,
526 struct pipe_resource *buf)
527 {
528 if (buf) {
529 struct pipe_constant_buffer cb;
530 cb.buffer = buf;
531 cb.buffer_offset = 0;
532 cb.buffer_size = buf->width0;
533 cb.user_buffer = NULL;
534 pipe->set_constant_buffer(pipe, shader, index, &cb);
535 } else {
536 pipe->set_constant_buffer(pipe, shader, index, NULL);
537 }
538 }
539
540
541 /**
542 * Get the polygon offset enable/disable flag for the given polygon fill mode.
543 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
544 */
545 static inline boolean
546 util_get_offset(const struct pipe_rasterizer_state *templ,
547 unsigned fill_mode)
548 {
549 switch(fill_mode) {
550 case PIPE_POLYGON_MODE_POINT:
551 return templ->offset_point;
552 case PIPE_POLYGON_MODE_LINE:
553 return templ->offset_line;
554 case PIPE_POLYGON_MODE_FILL:
555 return templ->offset_tri;
556 default:
557 assert(0);
558 return FALSE;
559 }
560 }
561
562 static inline float
563 util_get_min_point_size(const struct pipe_rasterizer_state *state)
564 {
565 /* The point size should be clamped to this value at the rasterizer stage.
566 */
567 return !state->point_quad_rasterization &&
568 !state->point_smooth &&
569 !state->multisample ? 1.0f : 0.0f;
570 }
571
572 static inline void
573 util_query_clear_result(union pipe_query_result *result, unsigned type)
574 {
575 switch (type) {
576 case PIPE_QUERY_OCCLUSION_PREDICATE:
577 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
578 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
579 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
580 case PIPE_QUERY_GPU_FINISHED:
581 result->b = FALSE;
582 break;
583 case PIPE_QUERY_OCCLUSION_COUNTER:
584 case PIPE_QUERY_TIMESTAMP:
585 case PIPE_QUERY_TIME_ELAPSED:
586 case PIPE_QUERY_PRIMITIVES_GENERATED:
587 case PIPE_QUERY_PRIMITIVES_EMITTED:
588 result->u64 = 0;
589 break;
590 case PIPE_QUERY_SO_STATISTICS:
591 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
592 break;
593 case PIPE_QUERY_TIMESTAMP_DISJOINT:
594 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
595 break;
596 case PIPE_QUERY_PIPELINE_STATISTICS:
597 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
598 break;
599 default:
600 memset(result, 0, sizeof(*result));
601 }
602 }
603
604 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
605 static inline enum tgsi_texture_type
606 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
607 unsigned nr_samples)
608 {
609 switch (pipe_tex_target) {
610 case PIPE_BUFFER:
611 return TGSI_TEXTURE_BUFFER;
612
613 case PIPE_TEXTURE_1D:
614 assert(nr_samples <= 1);
615 return TGSI_TEXTURE_1D;
616
617 case PIPE_TEXTURE_2D:
618 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
619
620 case PIPE_TEXTURE_RECT:
621 assert(nr_samples <= 1);
622 return TGSI_TEXTURE_RECT;
623
624 case PIPE_TEXTURE_3D:
625 assert(nr_samples <= 1);
626 return TGSI_TEXTURE_3D;
627
628 case PIPE_TEXTURE_CUBE:
629 assert(nr_samples <= 1);
630 return TGSI_TEXTURE_CUBE;
631
632 case PIPE_TEXTURE_1D_ARRAY:
633 assert(nr_samples <= 1);
634 return TGSI_TEXTURE_1D_ARRAY;
635
636 case PIPE_TEXTURE_2D_ARRAY:
637 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
638 TGSI_TEXTURE_2D_ARRAY;
639
640 case PIPE_TEXTURE_CUBE_ARRAY:
641 return TGSI_TEXTURE_CUBE_ARRAY;
642
643 default:
644 assert(0 && "unexpected texture target");
645 return TGSI_TEXTURE_UNKNOWN;
646 }
647 }
648
649
650 static inline void
651 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
652 const struct pipe_constant_buffer *src)
653 {
654 if (src) {
655 pipe_resource_reference(&dst->buffer, src->buffer);
656 dst->buffer_offset = src->buffer_offset;
657 dst->buffer_size = src->buffer_size;
658 dst->user_buffer = src->user_buffer;
659 }
660 else {
661 pipe_resource_reference(&dst->buffer, NULL);
662 dst->buffer_offset = 0;
663 dst->buffer_size = 0;
664 dst->user_buffer = NULL;
665 }
666 }
667
668 static inline void
669 util_copy_shader_buffer(struct pipe_shader_buffer *dst,
670 const struct pipe_shader_buffer *src)
671 {
672 if (src) {
673 pipe_resource_reference(&dst->buffer, src->buffer);
674 dst->buffer_offset = src->buffer_offset;
675 dst->buffer_size = src->buffer_size;
676 }
677 else {
678 pipe_resource_reference(&dst->buffer, NULL);
679 dst->buffer_offset = 0;
680 dst->buffer_size = 0;
681 }
682 }
683
684 static inline void
685 util_copy_image_view(struct pipe_image_view *dst,
686 const struct pipe_image_view *src)
687 {
688 if (src) {
689 pipe_resource_reference(&dst->resource, src->resource);
690 dst->format = src->format;
691 dst->access = src->access;
692 dst->shader_access = src->shader_access;
693 dst->u = src->u;
694 } else {
695 pipe_resource_reference(&dst->resource, NULL);
696 dst->format = PIPE_FORMAT_NONE;
697 dst->access = 0;
698 dst->shader_access = 0;
699 memset(&dst->u, 0, sizeof(dst->u));
700 }
701 }
702
703 static inline unsigned
704 util_max_layer(const struct pipe_resource *r, unsigned level)
705 {
706 switch (r->target) {
707 case PIPE_TEXTURE_3D:
708 return u_minify(r->depth0, level) - 1;
709 case PIPE_TEXTURE_CUBE:
710 assert(r->array_size == 6);
711 /* fall-through */
712 case PIPE_TEXTURE_1D_ARRAY:
713 case PIPE_TEXTURE_2D_ARRAY:
714 case PIPE_TEXTURE_CUBE_ARRAY:
715 return r->array_size - 1;
716 default:
717 return 0;
718 }
719 }
720
721 static inline unsigned
722 util_num_layers(const struct pipe_resource *r, unsigned level)
723 {
724 return util_max_layer(r, level) + 1;
725 }
726
727 static inline bool
728 util_texrange_covers_whole_level(const struct pipe_resource *tex,
729 unsigned level, unsigned x, unsigned y,
730 unsigned z, unsigned width,
731 unsigned height, unsigned depth)
732 {
733 return x == 0 && y == 0 && z == 0 &&
734 width == u_minify(tex->width0, level) &&
735 height == u_minify(tex->height0, level) &&
736 depth == util_num_layers(tex, level);
737 }
738
739 static inline bool
740 util_logicop_reads_dest(enum pipe_logicop op)
741 {
742 switch (op) {
743 case PIPE_LOGICOP_NOR:
744 case PIPE_LOGICOP_AND_INVERTED:
745 case PIPE_LOGICOP_AND_REVERSE:
746 case PIPE_LOGICOP_INVERT:
747 case PIPE_LOGICOP_XOR:
748 case PIPE_LOGICOP_NAND:
749 case PIPE_LOGICOP_AND:
750 case PIPE_LOGICOP_EQUIV:
751 case PIPE_LOGICOP_NOOP:
752 case PIPE_LOGICOP_OR_INVERTED:
753 case PIPE_LOGICOP_OR_REVERSE:
754 case PIPE_LOGICOP_OR:
755 return true;
756 case PIPE_LOGICOP_CLEAR:
757 case PIPE_LOGICOP_COPY_INVERTED:
758 case PIPE_LOGICOP_COPY:
759 case PIPE_LOGICOP_SET:
760 return false;
761 }
762 unreachable("bad logicop");
763 }
764
765 static inline struct pipe_context *
766 pipe_create_multimedia_context(struct pipe_screen *screen)
767 {
768 unsigned flags = 0;
769
770 if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
771 flags |= PIPE_CONTEXT_COMPUTE_ONLY;
772
773 return screen->context_create(screen, NULL, flags);
774 }
775
776 static inline unsigned util_res_sample_count(struct pipe_resource *res)
777 {
778 return res->nr_samples > 0 ? res->nr_samples : 1;
779 }
780
781 #ifdef __cplusplus
782 }
783 #endif
784
785 #endif /* U_INLINES_H */